From b7b92d595b5337d27c234ae23da48974d17260dc Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 14:04:13 -0800 Subject: [PATCH] include --- libs/langchain/langchain/chat_models/openai.py | 14 ++++++++++++-- .../integration_tests/chat_models/test_openai.py | 2 ++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index 5fc9e6c41e00d..c177758cd4620 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -342,6 +342,7 @@ def _completion_with_retry(**kwargs: Any) -> Any: def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: overall_token_usage: dict = {} + system_fingerprint = None for output in llm_outputs: if output is None: # Happens in streaming @@ -352,7 +353,12 @@ def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: overall_token_usage[k] += v else: overall_token_usage[k] = v - return {"token_usage": overall_token_usage, "model_name": self.model_name} + if system_fingerprint is None: + system_fingerprint = output.get("system_fingerprint") + combined = {"token_usage": overall_token_usage, "model_name": self.model_name} + if system_fingerprint: + combined["system_fingerprint"] = system_fingerprint + return combined def _stream( self, @@ -430,7 +436,11 @@ def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult: ) generations.append(gen) token_usage = response.get("usage", {}) - llm_output = {"token_usage": token_usage, "model_name": self.model_name} + llm_output = { + "token_usage": token_usage, + "model_name": self.model_name, + "system_fingerprint": response.get("system_fingerprint", ""), + } return ChatResult(generations=generations, llm_output=llm_output) async def _astream( diff --git a/libs/langchain/tests/integration_tests/chat_models/test_openai.py b/libs/langchain/tests/integration_tests/chat_models/test_openai.py index e1da41c384cb1..8450214abbae8 100644 --- a/libs/langchain/tests/integration_tests/chat_models/test_openai.py +++ b/libs/langchain/tests/integration_tests/chat_models/test_openai.py @@ -58,6 +58,7 @@ def test_chat_openai_generate() -> None: response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 + assert "system_fingerprint" in response.llm_output for generations in response.generations: assert len(generations) == 2 for generation in generations: @@ -163,6 +164,7 @@ async def test_async_chat_openai() -> None: response = await chat.agenerate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 + assert "system_fingerprint" in response.llm_output for generations in response.generations: assert len(generations) == 2 for generation in generations: