From 75cf35c7432b55ab5131cd87839cd3611a1b0f22 Mon Sep 17 00:00:00 2001 From: "David S. Batista" Date: Tue, 14 May 2024 12:22:38 +0200 Subject: [PATCH] fix: forcing response format to be JSON valid (#7692) * forcing response format to be JSON valid * adding release notes * cleaning up * Update haystack/components/evaluators/llm_evaluator.py Co-authored-by: Madeesh Kannan --------- Co-authored-by: Madeesh Kannan --- haystack/components/evaluators/llm_evaluator.py | 4 +++- ...-JSON-OpeanAI-LLM-based-evaluators-64816e68f137739b.yaml | 6 ++++++ 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/force-valid-JSON-OpeanAI-LLM-based-evaluators-64816e68f137739b.yaml diff --git a/haystack/components/evaluators/llm_evaluator.py b/haystack/components/evaluators/llm_evaluator.py index 1b6234031e..39f8653b98 100644 --- a/haystack/components/evaluators/llm_evaluator.py +++ b/haystack/components/evaluators/llm_evaluator.py @@ -87,7 +87,9 @@ def __init__( self.api_key = api_key if api == "openai": - self.generator = OpenAIGenerator(api_key=api_key) + self.generator = OpenAIGenerator( + api_key=api_key, generation_kwargs={"response_format": {"type": "json_object"}} + ) else: raise ValueError(f"Unsupported API: {api}") diff --git a/releasenotes/notes/force-valid-JSON-OpeanAI-LLM-based-evaluators-64816e68f137739b.yaml b/releasenotes/notes/force-valid-JSON-OpeanAI-LLM-based-evaluators-64816e68f137739b.yaml new file mode 100644 index 0000000000..19e1ddb15c --- /dev/null +++ b/releasenotes/notes/force-valid-JSON-OpeanAI-LLM-based-evaluators-64816e68f137739b.yaml @@ -0,0 +1,6 @@ +--- + +enhancements: + - | + Enforce JSON mode on OpenAI LLM-based evaluators so that the they always return valid JSON output. + This is to ensure that the output is always in a consistent format, regardless of the input.