From 63aaf0332acb5a582d6c382e55141f1b4116a419 Mon Sep 17 00:00:00 2001 From: Michael Dyer Date: Wed, 14 Feb 2024 18:25:29 +0100 Subject: [PATCH] Apply autoformatter --- app/llm/basic_request_handler.py | 4 +++- .../iris_langchain_completion_model.py | 4 +--- app/llm/llm_manager.py | 4 +--- app/llm/request_handler_interface.py | 9 ++++--- app/llm/wrapper/__init__.py | 24 ++++++++++++------- app/llm/wrapper/model.py | 16 +++++++++---- app/llm/wrapper/ollama.py | 8 +++++-- app/llm/wrapper/openai_chat.py | 12 +++++++--- 8 files changed, 53 insertions(+), 28 deletions(-) diff --git a/app/llm/basic_request_handler.py b/app/llm/basic_request_handler.py index 84af8883..a5d2ca15 100644 --- a/app/llm/basic_request_handler.py +++ b/app/llm/basic_request_handler.py @@ -15,7 +15,9 @@ def complete(self, prompt: str, arguments: CompletionArguments) -> str: llm = self.llm_manager.get_by_id(self.model_id) return llm.complete(prompt, arguments) - def chat(self, messages: list[IrisMessage], arguments: CompletionArguments) -> IrisMessage: + def chat( + self, messages: list[IrisMessage], arguments: CompletionArguments + ) -> IrisMessage: llm = self.llm_manager.get_by_id(self.model_id) return llm.chat(messages, arguments) diff --git a/app/llm/langchain/iris_langchain_completion_model.py b/app/llm/langchain/iris_langchain_completion_model.py index 1dc54c6b..b0d056e2 100644 --- a/app/llm/langchain/iris_langchain_completion_model.py +++ b/app/llm/langchain/iris_langchain_completion_model.py @@ -26,9 +26,7 @@ def _generate( generations = [] args = CompletionArguments(stop=stop) for prompt in prompts: - completion = self.request_handler.complete( - prompt=prompt, arguments=args - ) + completion = self.request_handler.complete(prompt=prompt, arguments=args) generations.append([Generation(text=completion)]) return LLMResult(generations=generations) diff --git a/app/llm/llm_manager.py b/app/llm/llm_manager.py index f08d3c73..fc8862a4 100644 --- a/app/llm/llm_manager.py +++ b/app/llm/llm_manager.py @@ -21,9 +21,7 @@ def load_llms() -> dict[str, LanguageModel]: yaml_dict = yaml.safe_load(file) llms = LLMList.model_validate({"llms": yaml_dict}).llms - return { - llm.id: llm for llm in llms - } + return {llm.id: llm for llm in llms} class LlmManager(metaclass=Singleton): diff --git a/app/llm/request_handler_interface.py b/app/llm/request_handler_interface.py index 6f2ad666..16ac9646 100644 --- a/app/llm/request_handler_interface.py +++ b/app/llm/request_handler_interface.py @@ -10,9 +10,12 @@ class RequestHandler(metaclass=ABCMeta): @classmethod def __subclasshook__(cls, subclass) -> bool: return ( - hasattr(subclass, "complete") and callable(subclass.complete) - and hasattr(subclass, "chat") and callable(subclass.chat) - and hasattr(subclass, "embed") and callable(subclass.embed) + hasattr(subclass, "complete") + and callable(subclass.complete) + and hasattr(subclass, "chat") + and callable(subclass.chat) + and hasattr(subclass, "embed") + and callable(subclass.embed) ) @abstractmethod diff --git a/app/llm/wrapper/__init__.py b/app/llm/wrapper/__init__.py index 367257b7..63129fd1 100644 --- a/app/llm/wrapper/__init__.py +++ b/app/llm/wrapper/__init__.py @@ -1,15 +1,21 @@ from llm.wrapper.model import LanguageModel -from llm.wrapper.openai_completion import NativeOpenAICompletionModel, AzureOpenAICompletionModel +from llm.wrapper.openai_completion import ( + NativeOpenAICompletionModel, + AzureOpenAICompletionModel, +) from llm.wrapper.openai_chat import NativeOpenAIChatModel, AzureOpenAIChatModel -from llm.wrapper.openai_embeddings import NativeOpenAIEmbeddingModel, AzureOpenAIEmbeddingModel +from llm.wrapper.openai_embeddings import ( + NativeOpenAIEmbeddingModel, + AzureOpenAIEmbeddingModel, +) from llm.wrapper.ollama import OllamaModel type AnyLLM = ( - NativeOpenAICompletionModel - | AzureOpenAICompletionModel - | NativeOpenAIChatModel - | AzureOpenAIChatModel - | NativeOpenAIEmbeddingModel - | AzureOpenAIEmbeddingModel - | OllamaModel + NativeOpenAICompletionModel + | AzureOpenAICompletionModel + | NativeOpenAIChatModel + | AzureOpenAIChatModel + | NativeOpenAIEmbeddingModel + | AzureOpenAIEmbeddingModel + | OllamaModel ) diff --git a/app/llm/wrapper/model.py b/app/llm/wrapper/model.py index 7af01e83..c831009f 100644 --- a/app/llm/wrapper/model.py +++ b/app/llm/wrapper/model.py @@ -23,7 +23,9 @@ def __subclasshook__(cls, subclass) -> bool: @abstractmethod def complete(self, prompt: str, arguments: CompletionArguments) -> str: """Create a completion from the prompt""" - raise NotImplementedError(f"The LLM {self.__str__()} does not support completion") + raise NotImplementedError( + f"The LLM {self.__str__()} does not support completion" + ) class ChatModel(LanguageModel, metaclass=ABCMeta): @@ -34,9 +36,13 @@ def __subclasshook__(cls, subclass) -> bool: return hasattr(subclass, "chat") and callable(subclass.chat) @abstractmethod - def chat(self, messages: list[IrisMessage], arguments: CompletionArguments) -> IrisMessage: + def chat( + self, messages: list[IrisMessage], arguments: CompletionArguments + ) -> IrisMessage: """Create a completion from the chat messages""" - raise NotImplementedError(f"The LLM {self.__str__()} does not support chat completion") + raise NotImplementedError( + f"The LLM {self.__str__()} does not support chat completion" + ) class EmbeddingModel(LanguageModel, metaclass=ABCMeta): @@ -49,4 +55,6 @@ def __subclasshook__(cls, subclass) -> bool: @abstractmethod def embed(self, text: str) -> list[float]: """Create an embedding from the text""" - raise NotImplementedError(f"The LLM {self.__str__()} does not support embeddings") + raise NotImplementedError( + f"The LLM {self.__str__()} does not support embeddings" + ) diff --git a/app/llm/wrapper/ollama.py b/app/llm/wrapper/ollama.py index 6d9afab8..acd229b1 100644 --- a/app/llm/wrapper/ollama.py +++ b/app/llm/wrapper/ollama.py @@ -34,8 +34,12 @@ def complete(self, prompt: str, arguments: CompletionArguments) -> str: response = self._client.generate(model=self.model, prompt=prompt) return response["response"] - def chat(self, messages: list[IrisMessage], arguments: CompletionArguments) -> IrisMessage: - response = self._client.chat(model=self.model, messages=convert_to_ollama_messages(messages)) + def chat( + self, messages: list[IrisMessage], arguments: CompletionArguments + ) -> IrisMessage: + response = self._client.chat( + model=self.model, messages=convert_to_ollama_messages(messages) + ) return convert_to_iris_message(response["message"]) def embed(self, text: str) -> list[float]: diff --git a/app/llm/wrapper/openai_chat.py b/app/llm/wrapper/openai_chat.py index 2d6f766c..87910f9a 100644 --- a/app/llm/wrapper/openai_chat.py +++ b/app/llm/wrapper/openai_chat.py @@ -9,8 +9,12 @@ from llm.wrapper.model import ChatModel -def convert_to_open_ai_messages(messages: list[IrisMessage]) -> list[ChatCompletionMessageParam]: - return [{"role": message.role.value, "content": message.text} for message in messages] +def convert_to_open_ai_messages( + messages: list[IrisMessage], +) -> list[ChatCompletionMessageParam]: + return [ + {"role": message.role.value, "content": message.text} for message in messages + ] def convert_to_iris_message(message: ChatCompletionMessage) -> IrisMessage: @@ -24,7 +28,9 @@ class OpenAIChatModel(ChatModel): api_key: str _client: OpenAI - def chat(self, messages: list[IrisMessage], arguments: CompletionArguments) -> IrisMessage: + def chat( + self, messages: list[IrisMessage], arguments: CompletionArguments + ) -> IrisMessage: response = self._client.chat.completions.create( model=self.model, messages=convert_to_open_ai_messages(messages),