diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..5251d0f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "cargo" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/Sync-Github.yml b/.github/workflows/Sync-Github.yml index 2e7632f..d144866 100644 --- a/.github/workflows/Sync-Github.yml +++ b/.github/workflows/Sync-Github.yml @@ -1,15 +1,16 @@ name: GitlabSync on: - - push - - delete + push: + branches: + - main jobs: sync: runs-on: ubuntu-latest name: Git Repo Sync steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: wangchucheng/git-repo-sync@v0.1.0 diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 54532ee..c826230 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Checkout Repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install SSH Client run: sudo apt-get update && sudo apt-get install -y openssh-client diff --git a/.github/workflows/tests-python.yml b/.github/workflows/tests-python.yml index 21f364f..247dd82 100644 --- a/.github/workflows/tests-python.yml +++ b/.github/workflows/tests-python.yml @@ -1,15 +1,19 @@ name: Test Geppetto -on: [ push, pull_request] +on: + push: + branches: + - main + - develop jobs: Test-Python3: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: '3.x' diff --git a/README.md b/README.md index 20e3b3e..8bc813e 100644 --- a/README.md +++ b/README.md @@ -7,16 +7,19 @@ Geppetto Logo

-Geppetto is a Slack bot for teams to easily interact with ChatGPT. It integrates with OpenAI's ChatGPT-4 and DALL-E-3 models. This project is brought to you by [DeepTechia](https://deeptechia.io/), where the future of technology meets today’s business needs. +Geppetto is a sophisticated Slack bot that facilitates seamless interaction with multiple AI models, including OpenAI's ChatGPT-4, DALL-E-3, and Google's Gemini model. This versatility allows for a variety of AI-driven interactions tailored to team requirements. This project is brought to you by [DeepTechia](https://deeptechia.io/), where the future of technology meets today’s business needs. ## Features -1. **Interaction with ChatGPT-4:** - - You can send direct messages to the application and receive responses from ChatGPT-4. - - Each message generates a conversation thread, and the application uses the message history to formulate coherent responses. +1. **Flexible AI Model Integration and System Management:** + - Users can seamlessly switch between ChatGPT-4-turbo and Gemini to suit their specific interaction needs. ChatGPT-4-turbo is set as the default model. + - You can send direct messages to the application and receive responses from Geppetto. Each message generates a conversation thread, and the application uses the message history to formulate coherent responses. + - The newly introduced LLM controller component allows the user to manage multiple AI models. + - Simplified installation and management process, facilitated by Docker deployment. + +2. **Advanced Image Generation with DALL-E-3:** + - Leverage DALL-E-3 to generate creative and contextually relevant images directly within Slack conversations. -2. **Image Generation with DALL-E-3:** - - The application uses DALL-E-3 to generate an image based on the message. ![Geppetto](/assets/Geppetto.gif) @@ -72,6 +75,8 @@ Before running the application, copy the `.configuration/.env.example` file into - `SIGNING_SECRET`: Your Signing secret to verify Slack requests (from your Slack App Credentials). - `DALLE_MODEL`: The OpenAI DALL-E-3 model. - `CHATGPT_MODEL`: The OpenAI ChatGPT-4 model. +- `GEMINI_MODEL`: The Gemini model. +- `GOOGLE_API_KEY`: The Google Gemini API key. ## Deployment @@ -89,11 +94,11 @@ Follow these steps to deploy Geppetto: Enjoy interacting with ChatGPT-4 and DALL-E-3 on Slack! ## Docker -To run geppetto in a docker container, when you have Docker & Docker compose installed: -1. Move docker-compose.example.yml to docker-compose.yml with customizing where your config folder resides -2. Change the config values in config/.env -3. Run docker compose build -4. Run docker compose up -d +To run geppetto in a docker container, when you have Docker and Docker compose installed: +1. Move `docker-compose.example.yml` to `docker-compose.yml`, specifying where your config folder resides. +2. Change the config values in `config/.env`. +3. Run `docker compose build`. +4. Run `docker compose up -d`. ## Tests @@ -104,7 +109,7 @@ or `python -m unittest -v` for a verbose more specific output ## About DeepTechia -We are DeepTechia, where the future of technology meets today’s business needs. As pioneers in the digital realm, we’ve made it our mission to bridge the gap between innovation and practicality, ensuring that businesses not only survive but thrive in an ever-evolving technological landscape. +We are [DeepTechia](https://deeptechia.io/), where the future of technology meets today’s business needs. As pioneers in the digital realm, we’ve made it our mission to bridge the gap between innovation and practicality, ensuring that businesses not only survive but thrive in an ever-evolving technological landscape. Born from a passion for cutting-edge technology and a vision for a digitally integrated future, DeepTechia was established to be more than just a tech consultancy. We are visionaries, strategists, and implementers, dedicated to pushing the boundaries of what’s possible while ensuring real-world applicability. diff --git a/config/.env.example b/config/.env.example index 9db802b..9c8479f 100644 --- a/config/.env.example +++ b/config/.env.example @@ -1,6 +1,8 @@ SLACK_BOT_TOKEN = "YOUR_TOKEN" SLACK_APP_TOKEN = "YOUR_TOKEN" OPENAI_API_KEY = "YOUR_TOKEN" -CHATGPT_MODEL = "gpt-4" +CHATGPT_MODEL = "gpt-4-turbo" DALLE_MODEL = "dall-e-3" SIGNING_SECRET = "YOUR_SECRET" +GOOGLE_API_KEY = "YOUR_TOKEN" +GEMINI_MODEL = "gemini-pro" diff --git a/config/allowed-slack-ids.json b/config/allowed-slack-ids.json index fa41012..0eb741b 100644 --- a/config/allowed-slack-ids.json +++ b/config/allowed-slack-ids.json @@ -1,4 +1,5 @@ { + "*":"*", "User A": "#MemberIDUserA", "User B": "#MemberIDUSerB" } diff --git a/config/default_responses.json b/config/default_responses.json index 481d0ed..f6f7bd9 100644 --- a/config/default_responses.json +++ b/config/default_responses.json @@ -2,8 +2,7 @@ "features": { "personality": "You are Geppetto, a general intelligence bot created by DeepTechia." }, - "dalle": { "preparing_image": "Preparing image.." }, "user": { "permission_denied": "The requesting user does not belong to the list of allowed users. Request permission to use the app" } -} +} \ No newline at end of file diff --git a/geppetto/exceptions.py b/geppetto/exceptions.py new file mode 100644 index 0000000..eefd108 --- /dev/null +++ b/geppetto/exceptions.py @@ -0,0 +1,10 @@ +# Geppetto Exceptions + + +class InvalidThreadFormatError(KeyError): + """Invalid thread format. + + Raise if the submitted thread format doesn't have the expected layout. + Since the UIs and the underlying LLM engines must meet an interface, + some validations have to be undertaken to assure key fields. + """ \ No newline at end of file diff --git a/geppetto/gemini_handler.py b/geppetto/gemini_handler.py new file mode 100644 index 0000000..d548388 --- /dev/null +++ b/geppetto/gemini_handler.py @@ -0,0 +1,61 @@ +from urllib.request import urlopen +import logging + +from .exceptions import InvalidThreadFormatError +from .llm_api_handler import LLMHandler +from dotenv import load_dotenv +from typing import List, Dict +import os +import textwrap +import google.generativeai as genai +from IPython.display import display +from IPython.display import Markdown + +load_dotenv(os.path.join("config", ".env")) + +GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") +GEMINI_MODEL=os.getenv("GEMINI_MODEL", "gemini-pro") +MSG_FIELD = "parts" +MSG_INPUT_FIELD = "content" + +def to_markdown(text): + text = text.replace('•', ' *') + return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True)) + +class GeminiHandler(LLMHandler): + + def __init__( + self, + personality, + ): + super().__init__( + 'Gemini', + GEMINI_MODEL, + genai.GenerativeModel(GEMINI_MODEL), + ) + self.personality = personality + self.system_role = "system" + self.assistant_role = "model" + self.user_role = "user" + genai.configure(api_key=GOOGLE_API_KEY) + + def llm_generate_content(self, user_prompt, status_callback=None, *status_callback_args): + logging.info("Sending msg to gemini: %s" % user_prompt) + if len(user_prompt) >= 2 and user_prompt[0].get('role') == 'user' and user_prompt[1].get('role') == 'user': + merged_prompt = { + 'role': 'user', + 'parts': [msg['parts'][0] for msg in user_prompt[:2]] + } + user_prompt = [merged_prompt] + user_prompt[2:] + response= self.client.generate_content(user_prompt) + markdown_response = to_markdown(response.text) + return str(markdown_response.data) + + def get_prompt_from_thread(self, thread: List[Dict], assistant_tag: str, user_tag: str): + prompt = super().get_prompt_from_thread(thread, assistant_tag, user_tag) + for msg in prompt: + if MSG_INPUT_FIELD in msg: + msg[MSG_FIELD] = [msg.pop(MSG_INPUT_FIELD)] + else: + raise InvalidThreadFormatError("The input thread doesn't have the field %s" % MSG_INPUT_FIELD) + return prompt diff --git a/geppetto/llm_api_handler.py b/geppetto/llm_api_handler.py new file mode 100644 index 0000000..b9b380b --- /dev/null +++ b/geppetto/llm_api_handler.py @@ -0,0 +1,30 @@ +from abc import ABC, abstractmethod +from typing import List, Dict, Callable +from .exceptions import InvalidThreadFormatError + +ROLE_FIELD = "role" + +class LLMHandler(ABC): + def __init__(self, name, model, client): + self.name = name + self.model = model + self.client = client + + def get_info(self): + return f"Name: {self.name} - Model: {self.model}" + + @abstractmethod + def llm_generate_content(self, prompt: str, callback: Callable, *callback_args): + pass + + def get_prompt_from_thread(self, thread: List[Dict], assistant_tag: str, user_tag: str): + prompt = [] + for msg in thread: + formatted_msg = dict(msg) + if ROLE_FIELD in formatted_msg: + formatted_msg[ROLE_FIELD] = formatted_msg[ROLE_FIELD].replace(assistant_tag, self.assistant_role) + formatted_msg[ROLE_FIELD] = formatted_msg[ROLE_FIELD].replace(user_tag, self.user_role) + prompt.append(formatted_msg) + else: + raise InvalidThreadFormatError("The input thread doesn't have the field %s" % ROLE_FIELD) + return prompt diff --git a/geppetto/llm_controller.py b/geppetto/llm_controller.py new file mode 100644 index 0000000..e495a4c --- /dev/null +++ b/geppetto/llm_controller.py @@ -0,0 +1,36 @@ +from typing import List, Type, TypedDict, Dict +from .llm_api_handler import LLMHandler + + +class LLMCfgRec(TypedDict): + name: str + handler: Type[LLMHandler] + handler_args: Dict + + +LLMCfgs = List[LLMCfgRec] + + +class LLMController: + + def __init__(self, llm_cfgs: LLMCfgs): + self.llm_cfgs = llm_cfgs + self.handlers = {} + + def init_controller(self): + for llm in self.llm_cfgs: + name = llm['name'] + self.handlers[name] = self.get_handler(name) + + def list_llms(self): + return [x['name'] for x in self.llm_cfgs] + + def get_llm_cfg(self, name): + for llm in self.llm_cfgs: + if llm['name'] == name: + return llm + raise ValueError("LLM configuration not found for name: %s" % name) + + def get_handler(self, name): + llm_cfg = self.get_llm_cfg(name) + return llm_cfg['handler'](**llm_cfg['handler_args']) diff --git a/geppetto/main.py b/geppetto/main.py index 05e9195..986ecaf 100644 --- a/geppetto/main.py +++ b/geppetto/main.py @@ -1,33 +1,59 @@ import os import logging from dotenv import load_dotenv + +from .llm_controller import LLMController from .slack_handler import SlackHandler +from .openai_handler import OpenAIHandler +from .gemini_handler import GeminiHandler from slack_bolt.adapter.socket_mode import SocketModeHandler from .utils import load_json load_dotenv(os.path.join("config", ".env")) -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") -DALLE_MODEL = os.getenv("DALLE_MODEL") -CHATGPT_MODEL = os.getenv("CHATGPT_MODEL") + SLACK_BOT_TOKEN = os.getenv("SLACK_BOT_TOKEN_TEST") SLACK_APP_TOKEN = os.getenv("SLACK_APP_TOKEN_TEST") SIGNING_SECRET = os.getenv("SIGNING_SECRET_TEST") +DEFAULT_RESPONSES = load_json("default_responses.json") + # Initialize logging # TODO: log to a file logging.basicConfig(level=logging.INFO) +def initialized_llm_controller(): + controller = LLMController( + [ + { + "name": "OpenAI", + "handler": OpenAIHandler, + "handler_args": { + "personality": DEFAULT_RESPONSES["features"]["personality"] + } + }, + { + "name": "Gemini", + "handler": GeminiHandler, + "handler_args": { + "personality": DEFAULT_RESPONSES["features"]["personality"] + } + } + ] + ) + controller.init_controller() + return controller + + + def main(): Slack_Handler = SlackHandler( load_json("allowed-slack-ids.json"), - load_json("default_responses.json"), + DEFAULT_RESPONSES, SLACK_BOT_TOKEN, SIGNING_SECRET, - OPENAI_API_KEY, - DALLE_MODEL, - CHATGPT_MODEL, + initialized_llm_controller() ) SocketModeHandler(Slack_Handler.app, SLACK_APP_TOKEN).start() diff --git a/geppetto/openai_handler.py b/geppetto/openai_handler.py index f215475..ff18d98 100644 --- a/geppetto/openai_handler.py +++ b/geppetto/openai_handler.py @@ -5,31 +5,56 @@ from urllib.request import urlopen import logging +from .exceptions import InvalidThreadFormatError +from .llm_api_handler import LLMHandler +from dotenv import load_dotenv +from typing import List, Dict +import os + +load_dotenv(os.path.join("config", ".env")) + +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") +DALLE_MODEL = os.getenv("DALLE_MODEL") +CHATGPT_MODEL = os.getenv("CHATGPT_MODEL") + +OPENAI_IMG_FUNCTION = "generate_image" +ROLE_FIELD = "role" + + +class OpenAIHandler(LLMHandler): -class OpenAIHandler: def __init__( - self, openai_api_key, dalle_model, chatgpt_model, bot_default_responses + self, + personality, ): - self.client = OpenAI(api_key=openai_api_key) - self.dalle_model = dalle_model - self.chatgpt_model = chatgpt_model - self.bot_default_responses = bot_default_responses - - def get_functionalities(self): - return json.dumps( - [ - "Generate an image from text", - "Get app functionalities", - ] + super().__init__( + 'OpenAI', + CHATGPT_MODEL, + OpenAI(api_key=OPENAI_API_KEY) ) + self.dalle_model = DALLE_MODEL + self.personality = personality + self.system_role = "system" + self.assistant_role = "assistant" + self.user_role = "user" - def download_image(self, url): + @staticmethod + def download_image(url): img = Image.open(urlopen(url=url)) img_byte_arr = BytesIO() img.save(img_byte_arr, format="PNG") img_byte_arr = img_byte_arr.getvalue() return img_byte_arr + @staticmethod + def get_functionalities(): + return json.dumps( + [ + "Generate an image from text", + "Get app functionalities", + ] + ) + def generate_image(self, prompt, size="1024x1024"): logging.info("Generating image: %s with size: %s" % (prompt, size)) try: @@ -44,13 +69,13 @@ def generate_image(self, prompt, size="1024x1024"): except Exception as e: logging.error(f"Error generating image: {e}") - def send_message(self, user_prompt, callback, *callback_args): - logging.info("Sending msg to chatgpt: %s" % (user_prompt)) + def llm_generate_content(self, user_prompt, status_callback=None, *status_callback_args): + logging.info("Sending msg to chatgpt: %s" % user_prompt) tools = [ { "type": "function", "function": { - "name": "generate_image", + "name": OPENAI_IMG_FUNCTION, "description": "Generate an image from text", "parameters": { "type": "object", @@ -80,13 +105,13 @@ def send_message(self, user_prompt, callback, *callback_args): # Initial conversation message messages = [ { - "role": "system", - "content": self.bot_default_responses["features"]["personality"], + "role": self.system_role, + "content": self.personality, }, *user_prompt, ] response = self.client.chat.completions.create( - model=self.chatgpt_model, + model=self.model, messages=messages, tools=tools, tool_choice="auto", @@ -95,15 +120,16 @@ def send_message(self, user_prompt, callback, *callback_args): tool_calls = response.choices[0].message.tool_calls if tool_calls: available_functions = { - "generate_image": self.generate_image, + OPENAI_IMG_FUNCTION: self.generate_image, "get_functionalities": self.get_functionalities, } tool_call = tool_calls[0] function_name = tool_call.function.name function_args = json.loads(tool_call.function.arguments) function = available_functions[function_name] - if function_name == "generate_image": - callback(*callback_args) + if function_name == OPENAI_IMG_FUNCTION and status_callback: + status_callback(*status_callback_args, ":geppetto: I'm preparing the image, please be patient " + ":lower_left_paintbrush: ...") response = function(**function_args) return response else: diff --git a/geppetto/slack_handler.py b/geppetto/slack_handler.py index ab04839..8fb4eab 100644 --- a/geppetto/slack_handler.py +++ b/geppetto/slack_handler.py @@ -2,14 +2,18 @@ import os from slack_bolt import App import certifi -from .openai_handler import OpenAIHandler +import re +from geppetto.utils import is_image_data, lower_string_list # Set SSL certificate for secure requests os.environ["SSL_CERT_FILE"] = certifi.where() +# UI roles +USER = "slack_user" +ASSISTANT = "geppetto" + class SlackHandler: - thread_messages = {} def __init__( self, @@ -17,16 +21,15 @@ def __init__( bot_default_responses, SLACK_BOT_TOKEN, SIGNING_SECRET, - OPENAI_API_KEY, - DALLE_MODEL, - CHATGPT_MODEL, + llm_controller ): - self.openai = OpenAIHandler( - OPENAI_API_KEY, DALLE_MODEL, CHATGPT_MODEL, bot_default_responses - ) + self.name = 'Geppetto Slack handler' + self.llm_ctrl = llm_controller + self.llm = llm_controller.handlers self.app = App(signing_secret=SIGNING_SECRET, token=SLACK_BOT_TOKEN) self.allowed_users = allowed_users self.bot_default_responses = bot_default_responses + self.thread_messages = {} # Direct Message Event @self.app.event("message") @@ -45,13 +48,22 @@ def handle_message(self, msg, channel_id, thread_id): channel_id, thread_id, ) - thread_history = self.thread_messages.get(thread_id, []) - thread_history.append({"role": "user", "content": msg}) - response = self.app.client.chat_postMessage( - channel=channel_id, - text=":geppetto: ... :thought_balloon: ...", - thread_ts=thread_id, + thread_history = self.thread_messages.get(thread_id, {"llm": "", "msgs": []}) + selected_llm = self.select_llm_from_msg(msg, thread_history["llm"]) + if thread_history["llm"] == "": + thread_history["llm"] = selected_llm + current_usr_msg = {"role": USER, "content": msg} + if thread_history["llm"] == selected_llm: + thread_history["msgs"].append(current_usr_msg) + else: + thread_history["llm"] = selected_llm + thread_history["msgs"] = [thread_history["msgs"][0], current_usr_msg] + + response = self.send_message( + channel_id, + thread_id, + ":geppetto: :thought_balloon: ..." ) if response["ok"]: @@ -60,46 +72,39 @@ def handle_message(self, msg, channel_id, thread_id): else: logging.error("Failed to post the message.") - response_from_chatgpt = self.openai.send_message( - thread_history, self.send_preparing_image_message, channel_id, thread_id + prompt = self.llm[selected_llm].get_prompt_from_thread(thread_history["msgs"], ASSISTANT, USER) + response_from_llm_api = self.llm[selected_llm].llm_generate_content( + prompt, + self.send_message, + channel_id, + thread_id, ) - if isinstance(response_from_chatgpt, str): - thread_history.append( - {"role": "assistant", "content": response_from_chatgpt} - ) + if isinstance(response_from_llm_api, str): + thread_history["msgs"].append({"role": ASSISTANT, "content": response_from_llm_api}) + self.thread_messages[thread_id] = thread_history try: - if isinstance(response_from_chatgpt, bytes): + if is_image_data(response_from_llm_api): self.app.client.files_upload_v2( channel=channel_id, thread_ts=thread_id, - username="Dall-E", - content=response_from_chatgpt, + content=response_from_llm_api, title="Image", - ) + ) # TODO: images from other apis might not use bytes as datatype else: - logging.info("response from chatgpt: %s" % response_from_chatgpt) + logging.info( + "response from %s: %s" % (self.name, response_from_llm_api) + ) self.app.client.chat_update( channel=channel_id, - text=response_from_chatgpt, + text=response_from_llm_api, thread_ts=thread_id, ts=timestamp, ) except Exception as e: logging.error("Error posting message: %s", e) - def send_preparing_image_message(self, channel_id, thread_id): - dalle_message = self.bot_default_responses["dalle"]["preparing_image"] - logging.info("Sending dalle default message: %s" % dalle_message) - - self.app.client.chat_postMessage( - channel=channel_id, - username="Dall-E", - text=dalle_message, - thread_ts=thread_id, - ) - def handle_event(self, body): event = body["event"] msg = event["text"] @@ -117,12 +122,34 @@ def handle_event(self, body): permission_denied_message = self.bot_default_responses["user"][ "permission_denied" ] - logging.info( - "Sending permission denied default message: %s" - % permission_denied_message - ) - self.app.client.chat_postMessage( - channel=channel_id, - text=permission_denied_message, - thread_ts=thread_id, - ) + self.send_message(channel_id, + thread_id, + permission_denied_message, + "permission_denied") + + def send_message(self, channel_id, thread_id, message, tag="general"): + logging.info( + "Sending %s message: %s" + % (tag, message) + ) + return self.app.client.chat_postMessage( + channel=channel_id, + text=message, + thread_ts=thread_id, + mrkdwn=True + ) + + def select_llm_from_msg(self, message, last_llm=''): + mentions = re.findall(r'(?<=\bllm_)\w+', message) + clean_mentions = [re.sub(r'[\#\!\?\,\;\.]', "", mention) for mention in mentions] + hashtags = lower_string_list(clean_mentions) + controlled_llms = self.llm_ctrl.list_llms() + controlled_llms_l = lower_string_list(controlled_llms) + check_list = list(set(controlled_llms_l) & set(hashtags)) + if len(check_list) == 1: + return controlled_llms[controlled_llms_l.index(check_list[0])] + elif len(check_list) == 0 and last_llm != '': + return last_llm + else: + # default first LLM + return controlled_llms[0] diff --git a/geppetto/utils.py b/geppetto/utils.py index 6c918bb..954202d 100644 --- a/geppetto/utils.py +++ b/geppetto/utils.py @@ -1,6 +1,7 @@ import json import logging import os +from typing import List def load_json(file_name): @@ -15,3 +16,11 @@ def load_json(file_name): except json.JSONDecodeError: logging.error("Error decoding %s file." % file_name) return {} + + +def is_image_data(data): + return isinstance(data, bytes) + + +def lower_string_list(list_to_process: List[str]): + return [element.lower() for element in list_to_process] diff --git a/pyproject.toml b/pyproject.toml index f6f3550..c3d4e69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,13 +26,15 @@ classifiers = [ ] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" certifi = "^2023.11.17" openai = "^1.4.0" python-dotenv = "^1.0.0" slack-bolt = "^1.18.1" slack-sdk = "^3.26.1" Pillow = "^10.1.0" +google-generativeai = "^0.5.0" +IPython = "^8.0.0" [tool.poetry.scripts] geppetto = "geppetto.main:main" diff --git a/requirements.txt b/requirements.txt index 112fb57..18aa8a7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,6 @@ openai>=1.4.0 python-dotenv==1.0.0 slack-bolt>=1.18.1 slack-sdk>=3.26.1 -pillow>=10.1.0 \ No newline at end of file +pillow>=10.1.0 +google-generativeai>=0.5.0 +IPython >=8.0.0 \ No newline at end of file diff --git a/tests/test_controller.py b/tests/test_controller.py new file mode 100644 index 0000000..a981044 --- /dev/null +++ b/tests/test_controller.py @@ -0,0 +1,119 @@ +import unittest +from geppetto.llm_api_handler import LLMHandler +from geppetto.llm_controller import LLMController + +ClientMock = {} + + +class HandlerMockA(LLMHandler): + + def __init__(self): + super().__init__( + "First LLM", + "LLM1", + ClientMock + ) + + def llm_generate_content(self, **args): + pass + + def get_prompt_from_thread(self, **args): + pass + + +class HandlerMockB(LLMHandler): + + def __init__(self, some_arg): + self.some_arg = some_arg + super().__init__( + "Second LLM", + "LLM2", + ClientMock + ) + + def llm_generate_content(self, **args): + pass + + def get_prompt_from_thread(self, **args): + pass + + +class HandlerMockC(LLMHandler): + + def __init__(self, some_arg): + self.some_arg = some_arg + super().__init__( + "Third LLM", + "LLM3", + ClientMock + ) + + def llm_generate_content(self, **args): + pass + + def get_prompt_from_thread(self, **args): + pass + + +sample_llms_cfg = [ + { + "name": "First LLM", + "handler": HandlerMockA, + "handler_args": {} + }, + { + "name": "Second LLM", + "handler": HandlerMockB, + "handler_args": {"some_arg": "SecondGPT"} + } +] + + +class TestController(unittest.TestCase): + @classmethod + def setUp(cls): + cls.llm_controller = LLMController( + sample_llms_cfg + ) + + @classmethod + def tearDown(cls): + cls.llm_controller = None + + def test_controller_set_up(self): + self.assertEqual(len(self.llm_controller.llm_cfgs), 2) + self.assertEqual(len(self.llm_controller.handlers), 0) + + def test_initialize_controller(self): + self.llm_controller.init_controller() + self.assertEqual(len(self.llm_controller.llm_cfgs), 2) + self.assertEqual(len(self.llm_controller.handlers),2) + + def test_get_llm_cfg(self): + cfg = self.llm_controller.get_llm_cfg("Second LLM") + self.assertEqual(cfg["handler_args"]["some_arg"], "SecondGPT") + + def test_attempt_get_nonexistent_llm_cfg(self): + self.assertRaises( + ValueError, + self.llm_controller.get_llm_cfg, + "Wrong LLM" + ) + + def test_list_llms(self): + list = self.llm_controller.list_llms() + self.assertEqual(list, ["First LLM", "Second LLM"]) + + def test_get_handler(self): + handler = self.llm_controller.get_handler("Second LLM") + self.assertIsInstance(handler, HandlerMockB) + + def test_controller_handler_usage(self): + self.llm_controller.init_controller() + info1 = self.llm_controller.handlers["First LLM"].get_info() + self.assertEqual(info1, "Name: First LLM - Model: LLM1") + self.assertEqual(self.llm_controller.handlers["Second LLM"].some_arg, "SecondGPT") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_gemini.py b/tests/test_gemini.py new file mode 100644 index 0000000..4531057 --- /dev/null +++ b/tests/test_gemini.py @@ -0,0 +1,98 @@ +import os +import sys +import unittest +from unittest.mock import Mock, patch + +script_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.dirname(script_dir) +sys.path.append(parent_dir) + +from geppetto.exceptions import InvalidThreadFormatError +from geppetto.gemini_handler import GeminiHandler + +def OF(**kw): + class OF: + pass + instance = OF() + for k, v in kw.items(): + setattr(instance, k, v) + return instance + +class TestGemini(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.patcher = patch("geppetto.gemini_handler.genai") + cls.mock_genai = cls.patcher.start() + cls.gemini_handler = GeminiHandler(personality="Your AI personality") + + @classmethod + def tearDownClass(cls): + cls.patcher.stop() + + def test_personality(self): + self.assertEqual(self.gemini_handler.personality, "Your AI personality") + + @patch("geppetto.gemini_handler.to_markdown") + def test_llm_generate_content(self, mock_to_markdown): + user_prompt = [ + {"role": "user", "parts": ["Hello"]}, + {"role": "user", "parts": ["How are you?"]} + ] + mock_response = Mock() + mock_response.text = "Mocked Gemini response" + self.gemini_handler.client.generate_content.return_value = mock_response + mock_to_markdown.return_value.data = "Mocked Markdown data" + + response = self.gemini_handler.llm_generate_content(user_prompt) + + self.assertEqual(response, "Mocked Markdown data") + mock_to_markdown.assert_called_once_with("Mocked Gemini response") + + def test_get_prompt_from_thread(self): + thread = [ + {"role": "slack_user", "content": "Message 1"}, + {"role": "geppetto", "content": "Message 2"} + ] + + ROLE_FIELD = "role" + MSG_FIELD = "parts" + + prompt = self.gemini_handler.get_prompt_from_thread( + thread, assistant_tag="geppetto", user_tag="slack_user" + ) + + self.assertIsInstance(prompt, list) + + for msg in prompt: + self.assertIsInstance(msg, dict) + self.assertIn(ROLE_FIELD, msg) + self.assertIn(MSG_FIELD, msg) + self.assertIsInstance(msg[MSG_FIELD], list) + self.assertTrue(msg[MSG_FIELD]) + + with self.assertRaises(InvalidThreadFormatError): + incomplete_thread = [{"role": "geppetto"}] + self.gemini_handler.get_prompt_from_thread( + incomplete_thread, assistant_tag="geppetto", user_tag="slack_user" + ) + + def test_llm_generate_content_user_repetition(self): + user_prompt = [ + {"role": "user", "parts": ["Hello"]}, + {"role": "user", "parts": ["How are you?"]}, + {"role": "geppetto", "parts": ["I'm fine."]} + ] + + with patch.object(self.gemini_handler.client, "generate_content") as mock_generate_content: + mock_response = Mock() + mock_response.text = "Mocked Gemini response" + mock_generate_content.return_value = mock_response + + self.gemini_handler.llm_generate_content(user_prompt) + + mock_generate_content.assert_called_once_with( + [{"role": "user", "parts": ["Hello", "How are you?"]}, {"role": "geppetto", "parts": ["I'm fine."]}] + ) + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/tests/test_open_ai.py b/tests/test_open_ai.py index 2e4be2f..6b858e4 100644 --- a/tests/test_open_ai.py +++ b/tests/test_open_ai.py @@ -11,6 +11,7 @@ from geppetto.openai_handler import OpenAIHandler +TEST_PERSONALITY = "Your AI assistant" def OF(**kw): class OF: @@ -27,17 +28,15 @@ class TestOpenAI(unittest.TestCase): def setUpClass(cls): cls.patcher = patch("geppetto.openai_handler.OpenAI") cls.mock_openai = cls.patcher.start() - cls.openai_handler = OpenAIHandler( - "openai_api_key", - "dall-e-3", - "gpt-4", - {"features": {"personality": "_"}}, - ) + cls.openai_handler = OpenAIHandler(personality=TEST_PERSONALITY) @classmethod def tearDownClass(cls): cls.patcher.stop() + def test_personality(self): + self.assertEqual(self.openai_handler.personality, TEST_PERSONALITY) + def test_send_text_message(self): user_prompt = [{"role": "user", "content": "Hello"}] @@ -48,10 +47,10 @@ def test_send_text_message(self): self.mock_openai().chat.completions.create.return_value = ( mock_chat_completion_response ) - response = self.openai_handler.send_message(user_prompt, None, None) + response = self.openai_handler.llm_generate_content(user_prompt, self.my_callback, None) self.assertEqual(response, "Mocked ChatGPT Response") - def my_callback(self, result): + def my_callback(self, *args): logging.info("Image sent successfully") @patch("geppetto.openai_handler.OpenAIHandler.download_image") @@ -78,9 +77,7 @@ def test_send_image_message(self, mock_download_image): user_prompt = [{"role": "user", "content": "Generate an image of a mountain"}] - callback = self.my_callback - - response = self.openai_handler.send_message(user_prompt, callback, None) + response = self.openai_handler.llm_generate_content(user_prompt, self.my_callback, None) # Assuming download_image returns bytes self.assertIsInstance(response, bytes) diff --git a/tests/test_slack.py b/tests/test_slack.py index 6fe4841..4df5de6 100644 --- a/tests/test_slack.py +++ b/tests/test_slack.py @@ -3,6 +3,9 @@ import unittest from unittest.mock import patch, ANY +from geppetto.llm_controller import LLMController +from tests.test_open_ai import TEST_PERSONALITY + script_dir = os.path.dirname(os.path.abspath(__file__)) parent_dir = os.path.dirname(script_dir) sys.path.append(parent_dir) @@ -10,20 +13,24 @@ from geppetto.utils import load_json from geppetto.slack_handler import SlackHandler +MOCK_GENERIC_LLM_RESPONSE = "Mock text response" +MOCK_GENERIC_LLM_RESPONSE_B = MOCK_GENERIC_LLM_RESPONSE + " B" +MOCK_GENERIC_LLM_RESPONSE_C = MOCK_GENERIC_LLM_RESPONSE + " C" class TestSlack(unittest.TestCase): @classmethod - def setUpClass(cls): - cls.patcher1 = patch("geppetto.slack_handler.OpenAIHandler") - cls.patcher2 = patch("geppetto.slack_handler.App") - cls.MockOpenAIHandler = cls.patcher1.start() - cls.MockApp = cls.patcher2.start() + def setUp(cls): + cls.patcherA = patch("tests.test_controller.HandlerMockA") + cls.patcherB = patch("tests.test_controller.HandlerMockB") + cls.patcherC = patch("tests.test_controller.HandlerMockC") + cls.MockLLMHandlerA = cls.patcherA.start() + cls.MockLLMHandlerB = cls.patcherB.start() + cls.MockLLMHandlerC = cls.patcherC.start() + cls.patcher1 = patch("geppetto.slack_handler.App") + cls.MockApp = cls.patcher1.start() SLACK_BOT_TOKEN = "slack_bot_token" SIGNING_SECRET = "signing_secret" - OPENAI_API_KEY = "openai_key" - DALLE_MODEL = "dall-e-3" - CHATGPT_MODEL = "gpt-4" BOT_DEFAULT_RESPONSES = load_json("default_responses.json") cls.slack_handler = SlackHandler( @@ -31,15 +38,17 @@ def setUpClass(cls): BOT_DEFAULT_RESPONSES, SLACK_BOT_TOKEN, SIGNING_SECRET, - OPENAI_API_KEY, - DALLE_MODEL, - CHATGPT_MODEL, + initialized_test_llm_controller(cls.MockLLMHandlerA, + cls.MockLLMHandlerB, + cls.MockLLMHandlerC) ) @classmethod - def tearDownClass(cls): + def tearDown(cls): cls.patcher1.stop() - cls.patcher2.stop() + cls.patcherA.stop() + cls.patcherB.stop() + cls.patcherC.stop() def test_permission_check(self): body = { @@ -57,6 +66,7 @@ def test_permission_check(self): channel="test_channel", text=self.slack_handler.bot_default_responses["user"]["permission_denied"], thread_ts="1", + mrkdwn=True ) def test_random_user_allowed_with_wildcard_permission(self): @@ -74,13 +84,13 @@ def test_random_user_allowed_with_wildcard_permission(self): self.MockApp().client.chat_postMessage.assert_called_with( channel="test_channel", - text=":geppetto: ... :thought_balloon: ...", + text=":geppetto: :thought_balloon: ...", thread_ts="1", + mrkdwn=True ) def test_handle_message(self): - mock_open_ai_response = "Mock text response" - self.MockOpenAIHandler().send_message.return_value = mock_open_ai_response + self.MockLLMHandlerA().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE channel_id = "test_channel" thread_id = "test_thread_id" @@ -90,52 +100,305 @@ def test_handle_message(self): self.assertIn(thread_id, self.slack_handler.thread_messages) self.assertIn( - {"role": "user", "content": message}, - self.slack_handler.thread_messages[thread_id], + {"role": "slack_user", "content": message}, + self.slack_handler.thread_messages[thread_id]["msgs"], ) self.assertIn( - {"role": "assistant", "content": mock_open_ai_response}, - self.slack_handler.thread_messages[thread_id], + {"role": "geppetto", "content": MOCK_GENERIC_LLM_RESPONSE}, + self.slack_handler.thread_messages[thread_id]["msgs"], ) self.MockApp().client.chat_postMessage.assert_called_with( channel=channel_id, - text=":geppetto: ... :thought_balloon: ...", + text=":geppetto: :thought_balloon: ...", thread_ts=thread_id, + mrkdwn=True ) self.MockApp().client.chat_update.assert_called_with( channel=channel_id, - text=mock_open_ai_response, + text=MOCK_GENERIC_LLM_RESPONSE, thread_ts=thread_id, ts=ANY, ) + def test_handle_message_switch_simple(self): + channel_id = "test_channel" + thread_id = "test_thread_id" + + # Case A: DEFAULT LLM A + self.MockLLMHandlerA().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE + message_a = "Test message" + self.slack_handler.handle_message(message_a, channel_id, thread_id) + self.assertIn( + {"role": "slack_user", "content": message_a}, + self.slack_handler.thread_messages[thread_id]["msgs"], + ) + self.assertIn( + {"role": "geppetto", "content": MOCK_GENERIC_LLM_RESPONSE}, + self.slack_handler.thread_messages[thread_id]["msgs"], + ) + + # Case B: LLM B + self.MockLLMHandlerB().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE_B + message_b = "Test message llm_llmb" + self.slack_handler.handle_message(message_b, channel_id, thread_id) + self.assertIn( + {"role": "slack_user", "content": message_b}, + self.slack_handler.thread_messages[thread_id]["msgs"], + ) + self.assertIn( + {"role": "geppetto", "content": MOCK_GENERIC_LLM_RESPONSE_B}, + self.slack_handler.thread_messages[thread_id]["msgs"], + ) + + def test_handle_message_switch_same_thread_continue_non_default(self): + channel_id = "test_channel" + thread_id = "test_thread_id" + + # Case C: LLM C + self.MockLLMHandlerC().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE_C + message_c = "Test message llm_llmc" + self.slack_handler.handle_message(message_c, channel_id, thread_id) + self.assertIn( + {"role": "slack_user", "content": message_c}, + self.slack_handler.thread_messages[thread_id]["msgs"], + ) + self.assertIn( + {"role": "geppetto", "content": MOCK_GENERIC_LLM_RESPONSE_C}, + self.slack_handler.thread_messages[thread_id]["msgs"], + ) + + # DON'T switch to DEFAULT in started NON DEFAULT conversations + self.MockLLMHandlerA().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE + non_labeled_msg = "Second message" + self.slack_handler.handle_message(non_labeled_msg, channel_id, thread_id) + self.assertIn( + {"role": "slack_user", "content": non_labeled_msg}, + self.slack_handler.thread_messages[thread_id]["msgs"], + ) + self.assertIn( + {"role": "geppetto", "content": MOCK_GENERIC_LLM_RESPONSE_C}, + self.slack_handler.thread_messages[thread_id]["msgs"], + ) + + def test_handle_message_switch_same_thread_reset_on_switch(self): + channel_id = "test_channel" + thread_id = "test_thread_id" + + # Case A: LLM A + self.MockLLMHandlerA().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE + message_a = "Test message llm_llma" + self.slack_handler.handle_message(message_a, channel_id, thread_id) + user_msg_a = {"role": "slack_user", "content": message_a} + geppetto_msg_a = {"role": "geppetto", "content": MOCK_GENERIC_LLM_RESPONSE} + self.assertEqual( + self.slack_handler.thread_messages[thread_id]["msgs"].count(user_msg_a), + 1 + ) + + self.assertEqual( + self.slack_handler.thread_messages[thread_id]["msgs"].count(geppetto_msg_a), + 1 + ) + + # Continue conversation with LLM A + for _ in range(3): + self.slack_handler.handle_message(message_a, channel_id, thread_id) + self.assertEqual( + self.slack_handler.thread_messages[thread_id]["msgs"].count(user_msg_a), + 4 + ) + self.assertEqual( + self.slack_handler.thread_messages[thread_id]["msgs"].count(geppetto_msg_a), + 4 + ) + + # SWITCH TO LLM C in an ongoing conversation + self.MockLLMHandlerC().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE_C + message_c = "Test message llm_llmc" + user_msg_c = {"role": "slack_user", "content": message_c} + geppetto_msg_c = {"role": "geppetto", "content": MOCK_GENERIC_LLM_RESPONSE_C} + self.slack_handler.handle_message(message_c, channel_id, thread_id) + # the first user message is kept but the rest is dumped + self.assertEqual( + self.slack_handler.thread_messages[thread_id]["msgs"].count(user_msg_a), + 1 + ) + # the previous llm responses are dumped + self.assertEqual( + self.slack_handler.thread_messages[thread_id]["msgs"].count(geppetto_msg_a), + 0 + ) + # the message that triggered the switch is kept to give context + self.assertEqual( + self.slack_handler.thread_messages[thread_id]["msgs"].count(user_msg_c), + 1 + ) + # the answer of the new selected llm + self.assertEqual( + self.slack_handler.thread_messages[thread_id]["msgs"].count(geppetto_msg_c), + 1 + ) + + def test_handle_message_switch_different_thread(self): + channel_id = "test_channel" + + thread_id_i = "test_thread_id_i" + thread_id_ii = "test_thread_id_ii" + non_labeled_msg = "Second message" + user_msg_generic = {"role": "slack_user", "content": non_labeled_msg} + + # --- LLM B on thread I --- + self.MockLLMHandlerB().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE + message_b = "Test message llm_llmb" + self.slack_handler.handle_message(message_b, channel_id, thread_id_i) + user_msg_b = {"role": "slack_user", "content": message_b} + geppetto_msg_b = {"role": "geppetto", "content": MOCK_GENERIC_LLM_RESPONSE} + + # --- LLM C on thread II --- + self.MockLLMHandlerC().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE + message_c = "Test message llm_llmc" + self.slack_handler.handle_message(message_c, channel_id, thread_id_ii) + user_msg_c = {"role": "slack_user", "content": message_c} + geppetto_msg_c = {"role": "geppetto", "content": MOCK_GENERIC_LLM_RESPONSE} + + # --- Return to LLM B on thread I --- + # check + self.assertEqual( + self.slack_handler.thread_messages[thread_id_i]["msgs"].count(user_msg_b), + 1 + ) + self.assertEqual( + self.slack_handler.thread_messages[thread_id_i]["msgs"].count(geppetto_msg_b), + 1 + ) + # Continue conversation with LLM B without label + for _ in range(3): + self.slack_handler.handle_message(non_labeled_msg, channel_id, thread_id_i) + + # --- Return to LLM C on thread II --- + # check + self.assertEqual( + self.slack_handler.thread_messages[thread_id_ii]["msgs"].count(user_msg_c), + 1 + ) + self.assertEqual( + self.slack_handler.thread_messages[thread_id_ii]["msgs"].count(geppetto_msg_c), + 1 + ) + # Continue conversation with LLM C without label + for _ in range(9): + self.slack_handler.handle_message(non_labeled_msg, channel_id, thread_id_ii) + + # --- Return to LLM B on thread I --- + # check + self.assertEqual( + self.slack_handler.thread_messages[thread_id_i]["msgs"].count(user_msg_b), + 1 + ) + self.assertEqual( + self.slack_handler.thread_messages[thread_id_i]["msgs"].count(user_msg_generic), + 3 + ) + self.assertEqual( + self.slack_handler.thread_messages[thread_id_i]["msgs"].count(geppetto_msg_b), + 4 + ) + + # --- Return to LLM C on thread II --- + # check + self.assertEqual( + self.slack_handler.thread_messages[thread_id_ii]["msgs"].count(user_msg_c), + 1 + ) + self.assertEqual( + self.slack_handler.thread_messages[thread_id_ii]["msgs"].count(user_msg_generic), + 9 + ) + self.assertEqual( + self.slack_handler.thread_messages[thread_id_ii]["msgs"].count(geppetto_msg_c), + 10 + ) + def test_handle_image(self): channel_id = "test_channel" thread_id = "test_thread_id" message = "Test message" - mock_open_ai_text_response = "Mock text response" - self.MockOpenAIHandler().send_message.return_value = mock_open_ai_text_response + self.MockLLMHandlerA().llm_generate_content.return_value = MOCK_GENERIC_LLM_RESPONSE self.slack_handler.handle_message(message, channel_id, thread_id) self.MockApp().client.chat_update.assert_called_with( channel=channel_id, - text=mock_open_ai_text_response, + text=MOCK_GENERIC_LLM_RESPONSE, thread_ts=thread_id, ts=ANY, ) mock_open_ai_byte_response = b"Mock byte response" - self.MockOpenAIHandler().send_message.return_value = mock_open_ai_byte_response + self.MockLLMHandlerA().llm_generate_content.return_value = mock_open_ai_byte_response self.slack_handler.handle_message(message, channel_id, thread_id) self.MockApp().client.files_upload_v2.assert_called_with( channel=channel_id, thread_ts=thread_id, - username="Dall-E", content=b"Mock byte response", title="Image", ) + def test_select_llm_from_msg(self): + message_a = "llm_llma Test message" + message_b = "Test llm_llmb message" + message_c = "Test message llm_llmc?" + message_default_empty = "Test message" + message_default_many = "llm_llmc Test llm_llmb message llm_llma" + message_default_wrong = "Test message #zeta" + + self.assertEqual(self.slack_handler.select_llm_from_msg( + message_a), "LlmA") + + self.assertEqual(self.slack_handler.select_llm_from_msg( + message_b), "LLMb") + + self.assertEqual(self.slack_handler.select_llm_from_msg( + message_c), "LLMC") + + self.assertEqual(self.slack_handler.select_llm_from_msg( + message_default_empty), "LlmA") + + self.assertEqual(self.slack_handler.select_llm_from_msg( + message_default_many), "LlmA") + + self.assertEqual(self.slack_handler.select_llm_from_msg( + message_default_wrong), "LlmA") + + +def initialized_test_llm_controller(mocked_handler_a, mocked_handler_b, mocked_handler_c): + controller = LLMController( + [ + { + "name": "LlmA", + "handler": mocked_handler_a, + "handler_args": { + "personality": TEST_PERSONALITY + } + }, + { + "name": "LLMb", + "handler": mocked_handler_b, + "handler_args": { + "personality": TEST_PERSONALITY + } + }, + { + "name": "LLMC", + "handler": mocked_handler_c, + "handler_args": { + "personality": TEST_PERSONALITY + } + } + ] + ) + controller.init_controller() + return controller if __name__ == "__main__": unittest.main()