Skip to content

Commit

Permalink
Remove caching of LLM instances
Browse files Browse the repository at this point in the history
  • Loading branch information
Hialus committed Jan 8, 2024
1 parent 74de5c7 commit 7f74dba
Showing 1 changed file with 13 additions and 9 deletions.
22 changes: 13 additions & 9 deletions app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,11 @@ def check_type(cls, v):
return v

def get_instance(cls):
if cls.instance is not None:
return cls.instance
cls.instance = OpenAI(**cls.llm_credentials)
return cls.instance
return OpenAI(**cls.llm_credentials)
# if cls.instance is not None:
# return cls.instance
# cls.instance = OpenAI(**cls.llm_credentials)
# return cls.instance


class StrategyLLMConfig(LLMModelConfig):
Expand All @@ -47,13 +48,16 @@ def check_type(cls, v):
return v

def get_instance(cls):
if cls.instance is not None:
return cls.instance
# Local import needed to avoid circular dependency
from app.llms.strategy_llm import StrategyLLM

cls.instance = StrategyLLM(cls.llms)
return cls.instance
return StrategyLLM(cls.llms)
# if# cls.instance is not None:
# return cls.instance
# # Local import needed to avoid circular dependency
# from app.llms.strategy_llm import StrategyLLM
#
# cls.instance = StrategyLLM(cls.llms)
# return cls.instance


class APIKeyConfig(BaseModel):
Expand Down

0 comments on commit 7f74dba

Please sign in to comment.