Skip to content

Commit

Permalink
Add litellm library for LLM's and update function parameters
Browse files Browse the repository at this point in the history
  • Loading branch information
haseeb-heaven committed Jan 14, 2024
1 parent 37d395f commit fd76d20
Showing 1 changed file with 15 additions and 13 deletions.
28 changes: 15 additions & 13 deletions libs/interpreter_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@
import os
import subprocess
import time
import litellm # Main libray for LLM's
from typing import List
from libs.code_interpreter import CodeInterpreter
from litellm import completion
from libs.history_manager import History
from libs.logger import Logger
from libs.markdown_code import display_code, display_markdown_message
Expand Down Expand Up @@ -192,7 +192,7 @@ def get_prompt(self,message: str, chat_history: List[dict]) -> str:
]
return messages

def execute_last_code(self,os_name,language='python'):
def execute_last_code(self,os_name):
try:
code_file,code_snippet = self.utility_manager.get_code_history(self.INTERPRETER_LANGUAGE)

Expand Down Expand Up @@ -235,17 +235,17 @@ def generate_content(self,message, chat_history: list[tuple[str, str]], temperat
# Set the custom language model provider
custom_llm_provider = "openai"
self.logger.info(f"Custom API mode selected for OpenAI, api_base={api_base}")
response = completion(self.INTERPRETER_MODEL, messages=messages, temperature=temperature, max_tokens=max_tokens, api_base=api_base, custom_llm_provider=custom_llm_provider)
response = litellm.completion(self.INTERPRETER_MODEL, messages=messages, temperature=temperature, max_tokens=max_tokens, api_base=api_base, custom_llm_provider=custom_llm_provider)
else:
self.logger.info(f"Default API mode selected for OpenAI.")
response = completion(self.INTERPRETER_MODEL, messages=messages, temperature=temperature, max_tokens=max_tokens)
response = litellm.completion(self.INTERPRETER_MODEL, messages=messages, temperature=temperature, max_tokens=max_tokens)
self.logger.info("Response received from completion function.")

# Check if the model is PALM-2
elif 'palm' in self.INTERPRETER_MODEL:
self.logger.info("Model is PALM-2.")
self.INTERPRETER_MODEL = "palm/chat-bison"
response = completion(self.INTERPRETER_MODEL, messages=messages,temperature=temperature,max_tokens=max_tokens)
response = litellm.completion(self.INTERPRETER_MODEL, messages=messages,temperature=temperature,max_tokens=max_tokens)
self.logger.info("Response received from completion function.")

# Check if the model is Gemini Pro
Expand Down Expand Up @@ -281,17 +281,17 @@ def generate_content(self,message, chat_history: list[tuple[str, str]], temperat
else:
self.logger.info("Model is Gemini Pro.")
self.INTERPRETER_MODEL = "gemini/gemini-pro"
response = completion(self.INTERPRETER_MODEL, messages=messages,temperature=temperature)
response = litellm.completion(self.INTERPRETER_MODEL, messages=messages,temperature=temperature)
self.logger.info("Response received from completion function.")

# Check if the model is GPT 3.5/4
# Check if the model is Local Model
elif 'local' in self.INTERPRETER_MODEL:
self.logger.info("Model is Local model")
if api_base != 'None':
# Set the custom language model provider
custom_llm_provider = "openai"
self.logger.info(f"Custom API mode selected for Local Model, api_base={api_base}")
response = completion(self.INTERPRETER_MODEL, messages=messages, temperature=temperature, max_tokens=max_tokens, api_base=api_base, custom_llm_provider=custom_llm_provider)
response = litellm.completion(self.INTERPRETER_MODEL, messages=messages, temperature=temperature, max_tokens=max_tokens, api_base=api_base, custom_llm_provider=custom_llm_provider)
else:
raise Exception("Exception api base not set for custom model")
self.logger.info("Response received from completion function.")
Expand All @@ -304,7 +304,7 @@ def generate_content(self,message, chat_history: list[tuple[str, str]], temperat
self.INTERPRETER_MODEL = 'huggingface/' + self.INTERPRETER_MODEL

self.logger.info(f"Model is from Hugging Face. {self.INTERPRETER_MODEL}")
response = completion(self.INTERPRETER_MODEL, messages=messages,temperature=temperature,max_tokens=max_tokens)
response = litellm.completion(self.INTERPRETER_MODEL, messages=messages,temperature=temperature,max_tokens=max_tokens)
self.logger.info("Response received from completion function.")

self.logger.info(f"Generated text {response}")
Expand Down Expand Up @@ -406,14 +406,16 @@ def interpreter_main(self):
display_markdown_message("Welcome to the **Interpreter**. I'm here to **assist** you with your everyday tasks. "
"\nPlease enter your task and I'll do my best to help you out.")

while True:
# Main System and Assistant loop.
running = True
while running:
try:
# Main input prompt - System and Assistant.
task = input("> ")

# Process the task.
# Command without arguments.
if task.lower() in ['/exit', '/quit']:
if task.lower() == '/exit':
break

# HELP - Command section.
Expand All @@ -422,7 +424,7 @@ def interpreter_main(self):
continue

# CLEAR - Command section.
elif task.lower() in ['/clear','/cls']:
elif task.lower() == '/clear':
self.utility_manager.clear_screen()
continue

Expand Down Expand Up @@ -477,7 +479,7 @@ def interpreter_main(self):

# EXECUTE - Command section.
elif task.lower() == '/execute':
self.execute_last_code(os_name,self.INTERPRETER_LANGUAGE)
self.execute_last_code(os_name)
continue

# SAVE - Command section.
Expand Down

0 comments on commit fd76d20

Please sign in to comment.