diff --git a/README.md b/README.md index 370c867..5275046 100644 --- a/README.md +++ b/README.md @@ -215,6 +215,7 @@ Here are the available commands: - 📝 `/save` - Save the last code generated. - ✏️ `/edit` - Edit the last code generated. - ▶️ `/execute` - Execute the last code generated. +- 🐛 `/debug` - Debug the last code generated. - 🔄 `/mode` - Change the mode of interpreter. - 🔄 `/model` - Change the model for interpreter. - 📦 `/install` - Install a package from npm or pip. diff --git a/interpreter.py b/interpreter.py index 0c4fc30..417fb1a 100755 --- a/interpreter.py +++ b/interpreter.py @@ -28,7 +28,7 @@ def main(): parser.add_argument('--save_code', '-s', action='store_true', help='Save the generated code') parser.add_argument('--mode', '-md', choices=['code', 'script', 'command','vision'], help='Select the mode (`code` for generating code, `script` for generating shell scripts, `command` for generating single line commands) `vision` for generating text from images') parser.add_argument('--model', '-m', type=str, default='code-llama', help='Set the model for code generation. (Defaults to gpt-3.5-turbo)') - parser.add_argument('--version', '-v', action='version', version='%(prog)s 1.8') + parser.add_argument('--version', '-v', action='version', version='%(prog)s 1.8.1') parser.add_argument('--lang', '-l', type=str, default='python', help='Set the interpreter language. (Defaults to Python)') parser.add_argument('--display_code', '-dc', action='store_true', help='Display the code in output') args = parser.parse_args() diff --git a/libs/interpreter_lib.py b/libs/interpreter_lib.py index 0ac5f2b..3dbb3b0 100644 --- a/libs/interpreter_lib.py +++ b/libs/interpreter_lib.py @@ -27,7 +27,7 @@ class Interpreter: logger = None client = None - interpreter_version = "1.8" + interpreter_version = "1.8.1" def __init__(self, args): self.args = args @@ -227,7 +227,7 @@ def execute_last_code(self,os_name,language='python'): self.logger.error(f"Error in processing command run code: {str(exception)}") raise - def generate_text(self,message, chat_history: list[tuple[str, str]], temperature=0.1, max_tokens=1024,config_values=None,image_file=None): + def generate_code(self,message, chat_history: list[tuple[str, str]], temperature=0.1, max_tokens=1024,config_values=None,image_file=None): self.logger.info(f"Generating code with args: message={message}, chat_history={chat_history}, temperature={temperature}, max_tokens={max_tokens}, config_values={config_values}, image_file={image_file}") # Use the values from the config file if they are provided @@ -373,7 +373,10 @@ def interpreter_main(self): self.logger.info(f"Code Interpreter - v{self.interpreter_version}") os_platform = self.utility_manager.get_os_platform() os_name = os_platform[0] + generated_output = None extracted_code = None + code_output, code_error = None, None + extracted_file_name = None # Seting the mode. if self.SCRIPT_MODE: @@ -457,7 +460,46 @@ def interpreter_main(self): elif os_platform[0].lower() == 'windows': os.startfile(code_file) continue - + + # DEBUG - Command section. + elif task.lower() in ['/debug','/d']: + + if not code_error: + code_error = code_output + + if not code_error: + display_markdown_message(f"Error: No error found in the code to fix.") + continue + + debug_prompt = f"Fix the errors in {self.INTERPRETER_LANGUAGE} language.\nCode is \n'{extracted_code}'\nAnd Error is \n'{code_error}'\n give me output only in code and no other text or explanation. And comment in code where you fixed the error.\n" + + # Start the LLM Request. + self.logger.info(f"Debug Prompt: {debug_prompt}") + generated_output = self.generate_code(debug_prompt, self.history, config_values=self.config_values,image_file=extracted_file_name) + + # Extract the code from the generated output. + self.logger.info(f"Generated output type {type(generated_output)}") + extracted_code = self.code_interpreter.extract_code(generated_output, start_sep, end_sep, skip_first_line,self.CODE_MODE) + + # Display the extracted code. + self.logger.info(f"Extracted code: {extracted_code[:50]}") + + if self.DISPLAY_CODE: + display_code(extracted_code) + self.logger.info("Code extracted successfully.") + + # Execute the code if the user has selected. + code_output, code_error = self.execute_code(extracted_code, os_name) + + if code_output: + self.logger.info(f"{self.INTERPRETER_LANGUAGE} code executed successfully.") + display_code(code_output) + self.logger.info(f"Output: {code_output[:100]}") + elif code_error: + self.logger.info(f"Python code executed with error.") + display_markdown_message(f"Error: {code_error}") + continue + # MODE - Command section. elif any(command in task.lower() for command in ['/mode ', '/md ']): mode = task.split(' ')[1] @@ -600,7 +642,7 @@ def interpreter_main(self): # Start the LLM Request. self.logger.info(f"Prompt: {prompt}") - generated_output = self.generate_text(prompt, self.history, config_values=self.config_values,image_file=extracted_file_name) + generated_output = self.generate_code(prompt, self.history, config_values=self.config_values,image_file=extracted_file_name) # No extra processing for Vision mode. if self.INTERPRETER_MODE == 'vision': diff --git a/libs/utility_manager.py b/libs/utility_manager.py index 576d3e9..04f221f 100644 --- a/libs/utility_manager.py +++ b/libs/utility_manager.py @@ -183,6 +183,7 @@ def display_help(self): /execute - Execute the last code generated.\n\ /install - Install a package from npm or pip.\n\ /save - Save the last code generated.\n\ + /debug - Debug the last code generated.\n\ /mode - Change the mode of interpreter.\n\ /model - Change the model for interpreter.\n\ /language - Change the language of the interpreter.\n\