diff --git a/config.json b/config.json index e3bdfd6..7b419b2 100644 --- a/config.json +++ b/config.json @@ -3,6 +3,7 @@ "ai_custom": {}, "esbmc_path": "~/.local/bin/esbmc", "allow_successful": true, + "verifier_timeout": 90, "esbmc_params": [ "--interval-analysis", "--goto-unwind", @@ -18,7 +19,10 @@ "--context-bound", "2" ], - "consecutive_prompt_delay": 20, + "requests": { + "max_tries": 5, + "timeout": 60 + }, "temp_auto_clean": false, "temp_file_dir": "./temp", "loading_hints": true, @@ -106,28 +110,6 @@ } ], "initial": "Generate a correction for the source code provided. Show the code only. Do not reply with acknowledgements." - }, - "optimize_code": { - "temperature": 1.0, - "system": [ - { - "role": "System", - "content": "You are a code optimizer. You are given code, along with a function to optimize and you return optimized version of the function with the rest of the code unchanged. The optimized function should be smaller than the original function if possible and also execute faster than the original. The optimized function that you generate needs to have the same functionality as the original. From this point on, you can only reply in source code. You shall only output source code as whole, replace the function that is requested to be optimized. Reply OK if you understand." - }, - { - "role": "AI", - "content": "OK" - } - ], - "initial": "Optimize the function \"%s\". Reoply with the entire source file back.", - "esbmc_params": [ - "--incremental-bmc", - "--no-bounds-check", - "--no-pointer-check", - "--no-div-by-zero-check", - "--max-k-step", - "10" - ] } } } \ No newline at end of file diff --git a/esbmc_ai/__main__.py b/esbmc_ai/__main__.py index 71952b0..c99594f 100755 --- a/esbmc_ai/__main__.py +++ b/esbmc_ai/__main__.py @@ -5,7 +5,6 @@ import os import re import sys -from time import sleep # Enables arrow key functionality for input(). Do not remove import. import readline @@ -272,6 +271,7 @@ def main() -> None: exit_code, esbmc_output, esbmc_err_output = esbmc( path=get_main_source_file_path(), esbmc_params=config.esbmc_params, + timeout=config.verifier_timeout, ) anim.stop() @@ -312,6 +312,8 @@ def main() -> None: chat_llm: BaseLanguageModel = config.ai_model.create_llm( api_keys=config.api_keys, temperature=config.chat_prompt_user_mode.temperature, + requests_max_tries=config.requests_max_tries, + requests_timeout=config.requests_timeout, ) printv("Creating user chat") @@ -404,9 +406,7 @@ def main() -> None: anim.start( "Message stack limit reached. Shortening message stack... Please Wait" ) - sleep(config.consecutive_prompt_delay) chat.compress_message_stack() - sleep(config.consecutive_prompt_delay) anim.stop() continue else: diff --git a/esbmc_ai/ai_models.py b/esbmc_ai/ai_models.py index e0f718e..1874bde 100644 --- a/esbmc_ai/ai_models.py +++ b/esbmc_ai/ai_models.py @@ -3,13 +3,16 @@ from abc import abstractmethod from typing import Any, Iterable, Union from enum import Enum +from pydantic.v1.types import SecretStr from typing_extensions import override from langchain.prompts import PromptTemplate from langchain.base_language import BaseLanguageModel from langchain_openai import ChatOpenAI -from langchain_community.llms import HuggingFaceTextGenInference +from langchain_community.llms.huggingface_text_gen_inference import ( + HuggingFaceTextGenInference, +) from langchain.prompts.chat import ( AIMessagePromptTemplate, @@ -43,6 +46,8 @@ def create_llm( self, api_keys: APIKeyCollection, temperature: float = 1.0, + requests_max_tries: int = 5, + requests_timeout: float = 60, ) -> BaseLanguageModel: """Initializes a large language model model with the provided parameters.""" raise NotImplementedError() @@ -144,12 +149,16 @@ def create_llm( self, api_keys: APIKeyCollection, temperature: float = 1.0, + requests_max_tries: int = 5, + requests_timeout: float = 60, ) -> BaseLanguageModel: return ChatOpenAI( model=self.name, - api_key=api_keys.openai, + api_key=SecretStr(api_keys.openai), max_tokens=None, temperature=temperature, + max_retries=requests_max_tries, + timeout=requests_timeout, model_kwargs={}, ) @@ -205,6 +214,8 @@ def create_llm( self, api_keys: APIKeyCollection, temperature: float = 1.0, + requests_max_tries: int = 5, + requests_timeout: float = 60, ) -> BaseLanguageModel: return HuggingFaceTextGenInference( client=None, @@ -218,6 +229,8 @@ def create_llm( max_new_tokens=5000, temperature=temperature, stop_sequences=self.stop_sequences, + max_retries=requests_max_tries, + timeout=requests_timeout, ) @override diff --git a/esbmc_ai/base_chat_interface.py b/esbmc_ai/base_chat_interface.py index 50158eb..4937be8 100644 --- a/esbmc_ai/base_chat_interface.py +++ b/esbmc_ai/base_chat_interface.py @@ -3,7 +3,6 @@ from abc import abstractmethod from langchain.base_language import BaseLanguageModel -from langchain_community.callbacks import get_openai_callback from langchain.schema import ( AIMessage, BaseMessage, @@ -12,11 +11,9 @@ PromptValue, ) -from openai import InternalServerError - -from .config import ChatPromptSettings +from esbmc_ai.config import ChatPromptSettings from .chat_response import ChatResponse, FinishReason -from .ai_models import AIModel, AIModelOpenAI +from .ai_models import AIModel class BaseChatInterface(object): diff --git a/esbmc_ai/commands/fix_code_command.py b/esbmc_ai/commands/fix_code_command.py index 44f24fb..4f60ef4 100644 --- a/esbmc_ai/commands/fix_code_command.py +++ b/esbmc_ai/commands/fix_code_command.py @@ -1,7 +1,6 @@ # Author: Yiannis Charalambous from os import get_terminal_size -from time import sleep from typing import Any, Tuple from typing_extensions import override from langchain.schema import AIMessage, HumanMessage @@ -49,15 +48,6 @@ def execute(self, **kwargs: Any) -> Tuple[bool, str]: source_code: str = kwargs["source_code"] esbmc_output: str = kwargs["esbmc_output"] - wait_time: int = int(config.consecutive_prompt_delay) - # Create time left animation to show how much time left between API calls - # This is done by creating a list of all the numbers to be displayed and - # setting the animation delay to 1 second. - wait_anim = create_loading_widget( - anim_speed=1, - animation=[str(num) for num in range(wait_time, 0, -1)], - ) - # Parse the esbmc output here and determine what "Scenario" to use. scenario: str = self._resolve_scenario(esbmc_output) @@ -71,6 +61,8 @@ def execute(self, **kwargs: Any) -> Tuple[bool, str]: llm = config.ai_model.create_llm( api_keys=config.api_keys, temperature=config.chat_prompt_generator_mode.temperature, + requests_max_tries=config.requests_max_tries, + requests_timeout=config.requests_timeout, ) solution_generator = SolutionGenerator( @@ -116,6 +108,7 @@ def execute(self, **kwargs: Any) -> Tuple[bool, str]: source_code=str(response), esbmc_params=config.esbmc_params, auto_clean=config.temp_auto_clean, + timeout=config.verifier_timeout, ) self.anim.stop() @@ -133,9 +126,6 @@ def execute(self, **kwargs: Any) -> Tuple[bool, str]: print(f"Failure {idx+1}/{max_retries}: Retrying...") # If final iteration no need to sleep. if idx < max_retries - 1: - wait_anim.start(f"Sleeping due to rate limit:") - sleep(config.consecutive_prompt_delay) - wait_anim.stop() # Inform solution generator chat about the ESBMC response. if exit_code != 1: diff --git a/esbmc_ai/commands/help_command.py b/esbmc_ai/commands/help_command.py index 6fbf6ad..39d5ee2 100644 --- a/esbmc_ai/commands/help_command.py +++ b/esbmc_ai/commands/help_command.py @@ -28,8 +28,6 @@ def execute(self, **_: Optional[Any]) -> Optional[Any]: print() print("Useful AI Questions:") print("1) How can I correct this code?") - print("2) Show me the corrected code.") - # TODO This needs to be uncommented as soon as ESBMC-AI can detect this query - # and trigger ESBMC to verify the code. - # print("3) Can you verify this corrected code with ESBMC again?") + print("2) What is the line with the error?") + print("3) What is the exact error?") print() diff --git a/esbmc_ai/config.py b/esbmc_ai/config.py index 30f5074..b8e8586 100644 --- a/esbmc_ai/config.py +++ b/esbmc_ai/config.py @@ -38,9 +38,12 @@ temp_auto_clean: bool = True temp_file_dir: str = "." -consecutive_prompt_delay: float = 20.0 ai_model: AIModel = AIModels.GPT_3.value +requests_max_tries: int = 5 +requests_timeout: float = 60 +verifier_timeout: float = 60 + loading_hints: bool = False allow_successful: bool = False @@ -285,22 +288,6 @@ def _load_ai_data(config: dict) -> None: scenarios=fcm_scenarios, ) - global chat_prompt_optimize_code - chat_prompt_optimize_code = ChatPromptSettings( - system_messages=AIAgentConversation.load_from_config( - config["chat_modes"]["optimize_code"]["system"] - ), - initial_prompt=config["chat_modes"]["optimize_code"]["initial"], - temperature=config["chat_modes"]["optimize_code"]["temperature"], - ) - - global esbmc_params_optimize_code - esbmc_params_optimize_code, _ = _load_config_value( - config["chat_modes"]["optimize_code"], - "esbmc_params", - esbmc_params_optimize_code, - ) - def _load_config_value( config_file: dict, name: str, default: object = None @@ -357,11 +344,27 @@ def load_config(file_path: str) -> None: esbmc_params, ) - global consecutive_prompt_delay - consecutive_prompt_delay = _load_config_real_number( - config_file, - "consecutive_prompt_delay", - consecutive_prompt_delay, + global requests_max_tries + requests_max_tries = int( + _load_config_real_number( + config_file=config_file["requests"], + name="max_tries", + default=requests_max_tries, + ) + ) + + global requests_timeout + requests_timeout = _load_config_real_number( + config_file=config_file["requests"], + name="timeout", + default=requests_timeout, + ) + + global verifier_timeout + verifier_timeout = _load_config_real_number( + config_file=config_file, + name="verifier_timeout", + default=verifier_timeout, ) global temp_auto_clean diff --git a/esbmc_ai/esbmc_util.py b/esbmc_ai/esbmc_util.py index ddd66aa..d589a84 100644 --- a/esbmc_ai/esbmc_util.py +++ b/esbmc_ai/esbmc_util.py @@ -1,13 +1,15 @@ # Author: Yiannis Charalambous import os -from subprocess import Popen, PIPE, STDOUT +import sys +from subprocess import PIPE, STDOUT, run, CompletedProcess from pathlib import Path +from typing import Optional from . import config -def esbmc(path: str, esbmc_params: list): +def esbmc(path: str, esbmc_params: list, timeout: Optional[float] = None): """Exit code will be 0 if verification successful, 1 if verification failed. And any other number for compilation error/general errors.""" # Build parameters @@ -15,14 +17,31 @@ def esbmc(path: str, esbmc_params: list): esbmc_cmd.extend(esbmc_params) esbmc_cmd.append(path) + if "--timeout" in esbmc_cmd: + print( + 'Do not add --timeout to ESBMC parameters, instead specify it in "verifier_timeout".' + ) + sys.exit(1) + + esbmc_cmd.extend(["--timeout", str(timeout)]) + + # Add slack time to process to allow verifier to timeout and end gracefully. + process_timeout: Optional[float] = timeout + 1 if timeout else None + # Run ESBMC and get output - process = Popen(esbmc_cmd, stdout=PIPE, stderr=STDOUT) - (output_bytes, err_bytes) = process.communicate() - # Return - exit_code = process.wait() + process: CompletedProcess = run( + esbmc_cmd, + stdout=PIPE, + stderr=STDOUT, + timeout=process_timeout, + ) + + output_bytes: bytes = process.stdout + err_bytes: bytes = process.stderr output: str = str(output_bytes).replace("\\n", "\n") err: str = str(err_bytes).replace("\\n", "\n") - return exit_code, output, err + + return process.returncode, output, err def esbmc_load_source_code( @@ -30,6 +49,7 @@ def esbmc_load_source_code( source_code: str, esbmc_params: list = config.esbmc_params, auto_clean: bool = config.temp_auto_clean, + timeout: Optional[float] = None, ): source_code_path = Path(file_path) @@ -48,7 +68,7 @@ def esbmc_load_source_code( file.flush() # Call ESBMC to temporary folder. - results = esbmc(file.name, esbmc_params) + results = esbmc(file.name, esbmc_params, timeout=timeout) # Delete temp files and path if auto_clean: