Skip to content

Commit

Permalink
include
Browse files Browse the repository at this point in the history
  • Loading branch information
baskaryan committed Nov 6, 2023
1 parent 8e0cb2e commit b7b92d5
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 2 deletions.
14 changes: 12 additions & 2 deletions libs/langchain/langchain/chat_models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,7 @@ def _completion_with_retry(**kwargs: Any) -> Any:

def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
system_fingerprint = None
for output in llm_outputs:
if output is None:
# Happens in streaming
Expand All @@ -352,7 +353,12 @@ def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
if system_fingerprint is None:
system_fingerprint = output.get("system_fingerprint")
combined = {"token_usage": overall_token_usage, "model_name": self.model_name}
if system_fingerprint:
combined["system_fingerprint"] = system_fingerprint
return combined

def _stream(
self,
Expand Down Expand Up @@ -430,7 +436,11 @@ def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
)
generations.append(gen)
token_usage = response.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
llm_output = {
"token_usage": token_usage,
"model_name": self.model_name,
"system_fingerprint": response.get("system_fingerprint", ""),
}
return ChatResult(generations=generations, llm_output=llm_output)

async def _astream(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ def test_chat_openai_generate() -> None:
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
assert "system_fingerprint" in response.llm_output
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
Expand Down Expand Up @@ -163,6 +164,7 @@ async def test_async_chat_openai() -> None:
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
assert "system_fingerprint" in response.llm_output
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
Expand Down

0 comments on commit b7b92d5

Please sign in to comment.