From 7f74dba5d63ba8e0e588cb33461494e19e7d7541 Mon Sep 17 00:00:00 2001 From: Timor Morrien Date: Mon, 8 Jan 2024 10:25:46 +0100 Subject: [PATCH] Remove caching of LLM instances --- app/config.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/app/config.py b/app/config.py index 63a2414a..f596d58d 100644 --- a/app/config.py +++ b/app/config.py @@ -30,10 +30,11 @@ def check_type(cls, v): return v def get_instance(cls): - if cls.instance is not None: - return cls.instance - cls.instance = OpenAI(**cls.llm_credentials) - return cls.instance + return OpenAI(**cls.llm_credentials) + # if cls.instance is not None: + # return cls.instance + # cls.instance = OpenAI(**cls.llm_credentials) + # return cls.instance class StrategyLLMConfig(LLMModelConfig): @@ -47,13 +48,16 @@ def check_type(cls, v): return v def get_instance(cls): - if cls.instance is not None: - return cls.instance - # Local import needed to avoid circular dependency from app.llms.strategy_llm import StrategyLLM - cls.instance = StrategyLLM(cls.llms) - return cls.instance + return StrategyLLM(cls.llms) + # if# cls.instance is not None: + # return cls.instance + # # Local import needed to avoid circular dependency + # from app.llms.strategy_llm import StrategyLLM + # + # cls.instance = StrategyLLM(cls.llms) + # return cls.instance class APIKeyConfig(BaseModel):