From d348cffbaee8176873683c4ee0a23fac02b803f2 Mon Sep 17 00:00:00 2001 From: mrT23 Date: Sun, 12 May 2024 15:52:59 +0300 Subject: [PATCH] Enhance error handling and logging in pr_code_suggestions with default scores and contextualized self_reflection --- pr_agent/tools/pr_code_suggestions.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/pr_agent/tools/pr_code_suggestions.py b/pr_agent/tools/pr_code_suggestions.py index 6d095d8a7..1d828fa5a 100644 --- a/pr_agent/tools/pr_code_suggestions.py +++ b/pr_agent/tools/pr_code_suggestions.py @@ -186,8 +186,20 @@ async def _get_prediction(self, model: str, patches_diff: str) -> dict: code_suggestions_feedback = response_reflect_yaml["code_suggestions"] if len(code_suggestions_feedback) == len(data["code_suggestions"]): for i, suggestion in enumerate(data["code_suggestions"]): - suggestion["score"] = code_suggestions_feedback[i]["suggestion_score"] - suggestion["score_why"] = code_suggestions_feedback[i]["why"] + try: + suggestion["score"] = code_suggestions_feedback[i]["suggestion_score"] + suggestion["score_why"] = code_suggestions_feedback[i]["why"] + except Exception as e: # + get_logger().error(f"Error processing suggestion score {i}", + artifact={"suggestion": suggestion, + "code_suggestions_feedback": code_suggestions_feedback[i]}) + suggestion["score"] = 7 + suggestion["score_why"] = "" + else: + get_logger().error(f"Could not self-reflect on suggestions. using default score 7") + for i, suggestion in enumerate(data["code_suggestions"]): + suggestion["score"] = 7 + suggestion["score_why"] = "" return data @@ -539,9 +551,10 @@ async def self_reflect_on_suggestions(self, suggestion_list: List, patches_diff: system_prompt_reflect = environment.from_string(get_settings().pr_code_suggestions_reflect_prompt.system).render( variables) user_prompt_reflect = environment.from_string(get_settings().pr_code_suggestions_reflect_prompt.user).render(variables) - response_reflect, finish_reason_reflect = await self.ai_handler.chat_completion(model=model, - system=system_prompt_reflect, - user=user_prompt_reflect) + with get_logger().contextualize(command="self_reflect_on_suggestions"): + response_reflect, finish_reason_reflect = await self.ai_handler.chat_completion(model=model, + system=system_prompt_reflect, + user=user_prompt_reflect) except Exception as e: get_logger().info(f"Could not reflect on suggestions, error: {e}") return ""