diff --git a/haystack/components/evaluators/llm_evaluator.py b/haystack/components/evaluators/llm_evaluator.py index 1b6234031e..39f8653b98 100644 --- a/haystack/components/evaluators/llm_evaluator.py +++ b/haystack/components/evaluators/llm_evaluator.py @@ -87,7 +87,9 @@ def __init__( self.api_key = api_key if api == "openai": - self.generator = OpenAIGenerator(api_key=api_key) + self.generator = OpenAIGenerator( + api_key=api_key, generation_kwargs={"response_format": {"type": "json_object"}} + ) else: raise ValueError(f"Unsupported API: {api}") diff --git a/releasenotes/notes/force-valid-JSON-OpeanAI-LLM-based-evaluators-64816e68f137739b.yaml b/releasenotes/notes/force-valid-JSON-OpeanAI-LLM-based-evaluators-64816e68f137739b.yaml new file mode 100644 index 0000000000..19e1ddb15c --- /dev/null +++ b/releasenotes/notes/force-valid-JSON-OpeanAI-LLM-based-evaluators-64816e68f137739b.yaml @@ -0,0 +1,6 @@ +--- + +enhancements: + - | + Enforce JSON mode on OpenAI LLM-based evaluators so that the they always return valid JSON output. + This is to ensure that the output is always in a consistent format, regardless of the input.