-
Notifications
You must be signed in to change notification settings - Fork 82
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Llm bot id classifier corrected #1646
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
@@ -82,17 +82,17 @@ def load_api_key(self, bot_id: Text): | |||||||||||||||
if bot_id: | ||||||||||||||||
from kairon.shared.admin.processor import Sysadmin | ||||||||||||||||
llm_secret = Sysadmin.get_llm_secret("openai", bot_id) | ||||||||||||||||
self.api_key = llm_secret.get('api_key') | ||||||||||||||||
self.secret = llm_secret.get('api_key') | ||||||||||||||||
elif os.environ.get("LLM_API_KEY"): | ||||||||||||||||
self.api_key = os.environ.get("LLM_API_KEY") | ||||||||||||||||
self.secret = {'api_key': os.environ.get("LLM_API_KEY")} | ||||||||||||||||
else: | ||||||||||||||||
raise KeyError( | ||||||||||||||||
f"either set bot_id'in LLMClassifier config or set LLM_API_KEY in environment variables" | ||||||||||||||||
) | ||||||||||||||||
|
||||||||||||||||
def get_embeddings(self, text): | ||||||||||||||||
embeddings = litellm.embedding( | ||||||||||||||||
model="text-embedding-3-small", input=text, api_key=self.api_key, max_retries=3 | ||||||||||||||||
model="text-embedding-3-small", input=text, max_retries=3, **self.secret | ||||||||||||||||
) | ||||||||||||||||
Comment on lines
+95
to
96
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Use configuration for embedding model The embedding model name is hardcoded despite being available in the component configuration. embeddings = litellm.embedding(
- model="text-embedding-3-small", input=text, max_retries=3, **self.secret
+ model=self.component_config.get("embedding_model", "text-embedding-3-small"),
+ input=text,
+ max_retries=self.component_config.get("retry", 3),
+ **self.secret
) 📝 Committable suggestion
Suggested change
|
||||||||||||||||
return [ embedding['embedding'] for embedding in embeddings['data']] | ||||||||||||||||
|
||||||||||||||||
|
@@ -188,8 +188,8 @@ def predict(self, text): | |||||||||||||||
top_p=1, | ||||||||||||||||
frequency_penalty=0, | ||||||||||||||||
presence_penalty=0, | ||||||||||||||||
api_key=self.api_key, | ||||||||||||||||
max_retries=3 | ||||||||||||||||
max_retries=3, | ||||||||||||||||
**self.secret | ||||||||||||||||
Comment on lines
+191
to
+192
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Improve error handling in predict method The error handling could be more specific and informative. max_retries=3,
**self.secret
)
logger.debug(response)
responses = json.loads(response.choices[0]["message"]["content"])
intent = responses["intent"] if "intent" in responses.keys() else "nlu_fallback"
explanation = responses["explanation"] if "explanation" in responses.keys() else None
entities = responses["entities"]if "entities" in responses.keys() else []
- except Exception as e:
- logger.error(e)
+ except json.JSONDecodeError as e:
+ logger.error(f"Failed to parse LLM response: {e}")
+ except (KeyError, IndexError) as e:
+ logger.error(f"Unexpected response format from LLM: {e}")
+ except Exception as e:
+ logger.error(f"Error during prediction: {str(e)}")
|
||||||||||||||||
) | ||||||||||||||||
logger.debug(response) | ||||||||||||||||
responses = json.loads(response.choices[0]["message"]["content"]) | ||||||||||||||||
|
Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
@@ -38,6 +38,11 @@ def train_model_for_bot(bot: str): | |||||||||||||||||||||||
stories = stories.merge(multiflow_stories[0]) | ||||||||||||||||||||||||
config = processor.load_config(bot) | ||||||||||||||||||||||||
config['assistant_id'] = bot | ||||||||||||||||||||||||
|
||||||||||||||||||||||||
index = next((index for (index, d) in enumerate(config['pipeline']) if d["name"] == "kairon.nlu.LLMClassifier"), None) | ||||||||||||||||||||||||
if index: | ||||||||||||||||||||||||
config['pipeline'][index]['bot_id']= bot | ||||||||||||||||||||||||
Comment on lines
+42
to
+44
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Add error handling for LLMClassifier configuration While the logic is correct, consider adding error handling to make the code more robust: - index = next((index for (index, d) in enumerate(config['pipeline']) if d["name"] == "kairon.nlu.LLMClassifier"), None)
- if index:
- config['pipeline'][index]['bot_id']= bot
+ if not config.get('pipeline'):
+ raise ValueError("Pipeline configuration is missing")
+
+ index = next((index for (index, d) in enumerate(config['pipeline']) if d["name"] == "kairon.nlu.LLMClassifier"), None)
+ if index is not None:
+ config['pipeline'][index]['bot_id'] = bot
+ else:
+ logging.debug("LLMClassifier not found in pipeline configuration") 📝 Committable suggestion
Suggested change
|
||||||||||||||||||||||||
|
||||||||||||||||||||||||
rules = processor.get_rules_for_training(bot) | ||||||||||||||||||||||||
rules = rules.merge(multiflow_stories[1]) | ||||||||||||||||||||||||
|
||||||||||||||||||||||||
|
@@ -124,3 +129,5 @@ def start_training(bot: str, user: str, token: str = None): | |||||||||||||||||||||||
exception=exception, | ||||||||||||||||||||||||
) | ||||||||||||||||||||||||
return model_file | ||||||||||||||||||||||||
|
||||||||||||||||||||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
🛠️ Refactor suggestion
Add validation for API key and type hints
The API key handling could be more robust with proper validation and type hints.
📝 Committable suggestion