Skip to content

Commit

Permalink
Merge pull request #114 from Yiannis128/rate-limit
Browse files Browse the repository at this point in the history
Better rate limiting and timeouts
  • Loading branch information
Yiannis128 authored Mar 27, 2024
2 parents 79d4c8c + a960b36 commit 02f8b20
Show file tree
Hide file tree
Showing 8 changed files with 83 additions and 80 deletions.
28 changes: 5 additions & 23 deletions config.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
"ai_custom": {},
"esbmc_path": "~/.local/bin/esbmc",
"allow_successful": true,
"verifier_timeout": 90,
"esbmc_params": [
"--interval-analysis",
"--goto-unwind",
Expand All @@ -18,7 +19,10 @@
"--context-bound",
"2"
],
"consecutive_prompt_delay": 20,
"requests": {
"max_tries": 5,
"timeout": 60
},
"temp_auto_clean": false,
"temp_file_dir": "./temp",
"loading_hints": true,
Expand Down Expand Up @@ -106,28 +110,6 @@
}
],
"initial": "Generate a correction for the source code provided. Show the code only. Do not reply with acknowledgements."
},
"optimize_code": {
"temperature": 1.0,
"system": [
{
"role": "System",
"content": "You are a code optimizer. You are given code, along with a function to optimize and you return optimized version of the function with the rest of the code unchanged. The optimized function should be smaller than the original function if possible and also execute faster than the original. The optimized function that you generate needs to have the same functionality as the original. From this point on, you can only reply in source code. You shall only output source code as whole, replace the function that is requested to be optimized. Reply OK if you understand."
},
{
"role": "AI",
"content": "OK"
}
],
"initial": "Optimize the function \"%s\". Reoply with the entire source file back.",
"esbmc_params": [
"--incremental-bmc",
"--no-bounds-check",
"--no-pointer-check",
"--no-div-by-zero-check",
"--max-k-step",
"10"
]
}
}
}
6 changes: 3 additions & 3 deletions esbmc_ai/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import os
import re
import sys
from time import sleep

# Enables arrow key functionality for input(). Do not remove import.
import readline
Expand Down Expand Up @@ -272,6 +271,7 @@ def main() -> None:
exit_code, esbmc_output, esbmc_err_output = esbmc(
path=get_main_source_file_path(),
esbmc_params=config.esbmc_params,
timeout=config.verifier_timeout,
)
anim.stop()

Expand Down Expand Up @@ -312,6 +312,8 @@ def main() -> None:
chat_llm: BaseLanguageModel = config.ai_model.create_llm(
api_keys=config.api_keys,
temperature=config.chat_prompt_user_mode.temperature,
requests_max_tries=config.requests_max_tries,
requests_timeout=config.requests_timeout,
)

printv("Creating user chat")
Expand Down Expand Up @@ -404,9 +406,7 @@ def main() -> None:
anim.start(
"Message stack limit reached. Shortening message stack... Please Wait"
)
sleep(config.consecutive_prompt_delay)
chat.compress_message_stack()
sleep(config.consecutive_prompt_delay)
anim.stop()
continue
else:
Expand Down
17 changes: 15 additions & 2 deletions esbmc_ai/ai_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,16 @@
from abc import abstractmethod
from typing import Any, Iterable, Union
from enum import Enum
from pydantic.v1.types import SecretStr
from typing_extensions import override

from langchain.prompts import PromptTemplate
from langchain.base_language import BaseLanguageModel

from langchain_openai import ChatOpenAI
from langchain_community.llms import HuggingFaceTextGenInference
from langchain_community.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)

from langchain.prompts.chat import (
AIMessagePromptTemplate,
Expand Down Expand Up @@ -43,6 +46,8 @@ def create_llm(
self,
api_keys: APIKeyCollection,
temperature: float = 1.0,
requests_max_tries: int = 5,
requests_timeout: float = 60,
) -> BaseLanguageModel:
"""Initializes a large language model model with the provided parameters."""
raise NotImplementedError()
Expand Down Expand Up @@ -144,12 +149,16 @@ def create_llm(
self,
api_keys: APIKeyCollection,
temperature: float = 1.0,
requests_max_tries: int = 5,
requests_timeout: float = 60,
) -> BaseLanguageModel:
return ChatOpenAI(
model=self.name,
api_key=api_keys.openai,
api_key=SecretStr(api_keys.openai),
max_tokens=None,
temperature=temperature,
max_retries=requests_max_tries,
timeout=requests_timeout,
model_kwargs={},
)

Expand Down Expand Up @@ -205,6 +214,8 @@ def create_llm(
self,
api_keys: APIKeyCollection,
temperature: float = 1.0,
requests_max_tries: int = 5,
requests_timeout: float = 60,
) -> BaseLanguageModel:
return HuggingFaceTextGenInference(
client=None,
Expand All @@ -218,6 +229,8 @@ def create_llm(
max_new_tokens=5000,
temperature=temperature,
stop_sequences=self.stop_sequences,
max_retries=requests_max_tries,
timeout=requests_timeout,
)

@override
Expand Down
7 changes: 2 additions & 5 deletions esbmc_ai/base_chat_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from abc import abstractmethod

from langchain.base_language import BaseLanguageModel
from langchain_community.callbacks import get_openai_callback
from langchain.schema import (
AIMessage,
BaseMessage,
Expand All @@ -12,11 +11,9 @@
PromptValue,
)

from openai import InternalServerError

from .config import ChatPromptSettings
from esbmc_ai.config import ChatPromptSettings
from .chat_response import ChatResponse, FinishReason
from .ai_models import AIModel, AIModelOpenAI
from .ai_models import AIModel


class BaseChatInterface(object):
Expand Down
16 changes: 3 additions & 13 deletions esbmc_ai/commands/fix_code_command.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Author: Yiannis Charalambous

from os import get_terminal_size
from time import sleep
from typing import Any, Tuple
from typing_extensions import override
from langchain.schema import AIMessage, HumanMessage
Expand Down Expand Up @@ -49,15 +48,6 @@ def execute(self, **kwargs: Any) -> Tuple[bool, str]:
source_code: str = kwargs["source_code"]
esbmc_output: str = kwargs["esbmc_output"]

wait_time: int = int(config.consecutive_prompt_delay)
# Create time left animation to show how much time left between API calls
# This is done by creating a list of all the numbers to be displayed and
# setting the animation delay to 1 second.
wait_anim = create_loading_widget(
anim_speed=1,
animation=[str(num) for num in range(wait_time, 0, -1)],
)

# Parse the esbmc output here and determine what "Scenario" to use.
scenario: str = self._resolve_scenario(esbmc_output)

Expand All @@ -71,6 +61,8 @@ def execute(self, **kwargs: Any) -> Tuple[bool, str]:
llm = config.ai_model.create_llm(
api_keys=config.api_keys,
temperature=config.chat_prompt_generator_mode.temperature,
requests_max_tries=config.requests_max_tries,
requests_timeout=config.requests_timeout,
)

solution_generator = SolutionGenerator(
Expand Down Expand Up @@ -116,6 +108,7 @@ def execute(self, **kwargs: Any) -> Tuple[bool, str]:
source_code=str(response),
esbmc_params=config.esbmc_params,
auto_clean=config.temp_auto_clean,
timeout=config.verifier_timeout,
)
self.anim.stop()

Expand All @@ -133,9 +126,6 @@ def execute(self, **kwargs: Any) -> Tuple[bool, str]:
print(f"Failure {idx+1}/{max_retries}: Retrying...")
# If final iteration no need to sleep.
if idx < max_retries - 1:
wait_anim.start(f"Sleeping due to rate limit:")
sleep(config.consecutive_prompt_delay)
wait_anim.stop()

# Inform solution generator chat about the ESBMC response.
if exit_code != 1:
Expand Down
6 changes: 2 additions & 4 deletions esbmc_ai/commands/help_command.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ def execute(self, **_: Optional[Any]) -> Optional[Any]:
print()
print("Useful AI Questions:")
print("1) How can I correct this code?")
print("2) Show me the corrected code.")
# TODO This needs to be uncommented as soon as ESBMC-AI can detect this query
# and trigger ESBMC to verify the code.
# print("3) Can you verify this corrected code with ESBMC again?")
print("2) What is the line with the error?")
print("3) What is the exact error?")
print()
47 changes: 25 additions & 22 deletions esbmc_ai/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,12 @@

temp_auto_clean: bool = True
temp_file_dir: str = "."
consecutive_prompt_delay: float = 20.0
ai_model: AIModel = AIModels.GPT_3.value

requests_max_tries: int = 5
requests_timeout: float = 60
verifier_timeout: float = 60

loading_hints: bool = False
allow_successful: bool = False

Expand Down Expand Up @@ -285,22 +288,6 @@ def _load_ai_data(config: dict) -> None:
scenarios=fcm_scenarios,
)

global chat_prompt_optimize_code
chat_prompt_optimize_code = ChatPromptSettings(
system_messages=AIAgentConversation.load_from_config(
config["chat_modes"]["optimize_code"]["system"]
),
initial_prompt=config["chat_modes"]["optimize_code"]["initial"],
temperature=config["chat_modes"]["optimize_code"]["temperature"],
)

global esbmc_params_optimize_code
esbmc_params_optimize_code, _ = _load_config_value(
config["chat_modes"]["optimize_code"],
"esbmc_params",
esbmc_params_optimize_code,
)


def _load_config_value(
config_file: dict, name: str, default: object = None
Expand Down Expand Up @@ -357,11 +344,27 @@ def load_config(file_path: str) -> None:
esbmc_params,
)

global consecutive_prompt_delay
consecutive_prompt_delay = _load_config_real_number(
config_file,
"consecutive_prompt_delay",
consecutive_prompt_delay,
global requests_max_tries
requests_max_tries = int(
_load_config_real_number(
config_file=config_file["requests"],
name="max_tries",
default=requests_max_tries,
)
)

global requests_timeout
requests_timeout = _load_config_real_number(
config_file=config_file["requests"],
name="timeout",
default=requests_timeout,
)

global verifier_timeout
verifier_timeout = _load_config_real_number(
config_file=config_file,
name="verifier_timeout",
default=verifier_timeout,
)

global temp_auto_clean
Expand Down
36 changes: 28 additions & 8 deletions esbmc_ai/esbmc_util.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,55 @@
# Author: Yiannis Charalambous

import os
from subprocess import Popen, PIPE, STDOUT
import sys
from subprocess import PIPE, STDOUT, run, CompletedProcess
from pathlib import Path
from typing import Optional

from . import config


def esbmc(path: str, esbmc_params: list):
def esbmc(path: str, esbmc_params: list, timeout: Optional[float] = None):
"""Exit code will be 0 if verification successful, 1 if verification
failed. And any other number for compilation error/general errors."""
# Build parameters
esbmc_cmd = [config.esbmc_path]
esbmc_cmd.extend(esbmc_params)
esbmc_cmd.append(path)

if "--timeout" in esbmc_cmd:
print(
'Do not add --timeout to ESBMC parameters, instead specify it in "verifier_timeout".'
)
sys.exit(1)

esbmc_cmd.extend(["--timeout", str(timeout)])

# Add slack time to process to allow verifier to timeout and end gracefully.
process_timeout: Optional[float] = timeout + 1 if timeout else None

# Run ESBMC and get output
process = Popen(esbmc_cmd, stdout=PIPE, stderr=STDOUT)
(output_bytes, err_bytes) = process.communicate()
# Return
exit_code = process.wait()
process: CompletedProcess = run(
esbmc_cmd,
stdout=PIPE,
stderr=STDOUT,
timeout=process_timeout,
)

output_bytes: bytes = process.stdout
err_bytes: bytes = process.stderr
output: str = str(output_bytes).replace("\\n", "\n")
err: str = str(err_bytes).replace("\\n", "\n")
return exit_code, output, err

return process.returncode, output, err


def esbmc_load_source_code(
file_path: str,
source_code: str,
esbmc_params: list = config.esbmc_params,
auto_clean: bool = config.temp_auto_clean,
timeout: Optional[float] = None,
):
source_code_path = Path(file_path)

Expand All @@ -48,7 +68,7 @@ def esbmc_load_source_code(
file.flush()

# Call ESBMC to temporary folder.
results = esbmc(file.name, esbmc_params)
results = esbmc(file.name, esbmc_params, timeout=timeout)

# Delete temp files and path
if auto_clean:
Expand Down

0 comments on commit 02f8b20

Please sign in to comment.