From 90a39f7e7b5274d931d3620e3992e44dcc1e7e50 Mon Sep 17 00:00:00 2001 From: Vesna Tanko Date: Wed, 20 Dec 2023 11:42:44 +0100 Subject: [PATCH] Chat GPT Constructor: Consider Model when caching --- orangecontrib/prototypes/widgets/owchatgptconstructor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/orangecontrib/prototypes/widgets/owchatgptconstructor.py b/orangecontrib/prototypes/widgets/owchatgptconstructor.py index 784cdc5f..b46f2348 100644 --- a/orangecontrib/prototypes/widgets/owchatgptconstructor.py +++ b/orangecontrib/prototypes/widgets/owchatgptconstructor.py @@ -48,15 +48,15 @@ def ask_gpt(self, state) -> List: if state.is_interruption_requested(): raise Exception - args = (text.strip(), + args = (MODELS[self.model_index], + text.strip(), self.prompt_start.strip(), self.prompt_end.strip()) if args in self.cache: answer = self.cache[args] else: try: - answer = run_gpt(self.access_key, MODELS[self.model_index], - *args) + answer = run_gpt(self.access_key, *args) self.cache[args] = answer except Exception as ex: answer = ex