diff --git a/kairon/nlu/classifiers/llm.py b/kairon/nlu/classifiers/llm.py index f438c36de..39cec4b8c 100644 --- a/kairon/nlu/classifiers/llm.py +++ b/kairon/nlu/classifiers/llm.py @@ -82,9 +82,9 @@ def load_api_key(self, bot_id: Text): if bot_id: from kairon.shared.admin.processor import Sysadmin llm_secret = Sysadmin.get_llm_secret("openai", bot_id) - self.api_key = llm_secret.get('api_key') + self.secret = llm_secret.get('api_key') elif os.environ.get("LLM_API_KEY"): - self.api_key = os.environ.get("LLM_API_KEY") + self.secret = {'api_key': os.environ.get("LLM_API_KEY")} else: raise KeyError( f"either set bot_id'in LLMClassifier config or set LLM_API_KEY in environment variables" @@ -92,7 +92,7 @@ def load_api_key(self, bot_id: Text): def get_embeddings(self, text): embeddings = litellm.embedding( - model="text-embedding-3-small", input=text, api_key=self.api_key, max_retries=3 + model="text-embedding-3-small", input=text, max_retries=3, **self.secret ) return [ embedding['embedding'] for embedding in embeddings['data']] @@ -188,8 +188,8 @@ def predict(self, text): top_p=1, frequency_penalty=0, presence_penalty=0, - api_key=self.api_key, - max_retries=3 + max_retries=3, + **self.secret ) logger.debug(response) responses = json.loads(response.choices[0]["message"]["content"]) diff --git a/kairon/train.py b/kairon/train.py index aa19943cb..f4346a350 100644 --- a/kairon/train.py +++ b/kairon/train.py @@ -38,6 +38,11 @@ def train_model_for_bot(bot: str): stories = stories.merge(multiflow_stories[0]) config = processor.load_config(bot) config['assistant_id'] = bot + + index = next((index for (index, d) in enumerate(config['pipeline']) if d["name"] == "kairon.nlu.LLMClassifier"), None) + if index: + config[index]['bot_id']= bot + rules = processor.get_rules_for_training(bot) rules = rules.merge(multiflow_stories[1]) @@ -124,3 +129,5 @@ def start_training(bot: str, user: str, token: str = None): exception=exception, ) return model_file + +