From 72606b73bf8d5370660a0830def029ae58f26ace Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Sun, 17 Dec 2023 17:59:27 -0800 Subject: [PATCH] community: Add logprobs in gen output (#14826) Now that it's supported again for OAI chat models . Shame this wouldn't include it in the `.invoke()` output though (it's not included in the message itself). Would need to do a follow-up for that to be the case --- libs/community/langchain_community/chat_models/openai.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libs/community/langchain_community/chat_models/openai.py b/libs/community/langchain_community/chat_models/openai.py index 7026624c1b067..acbcc943f0dec 100644 --- a/libs/community/langchain_community/chat_models/openai.py +++ b/libs/community/langchain_community/chat_models/openai.py @@ -454,9 +454,12 @@ def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult: response = response.dict() for res in response["choices"]: message = convert_dict_to_message(res["message"]) + generation_info = dict(finish_reason=res.get("finish_reason")) + if "logprobs" in res: + generation_info["logprobs"] = res["logprobs"] gen = ChatGeneration( message=message, - generation_info=dict(finish_reason=res.get("finish_reason")), + generation_info=generation_info, ) generations.append(gen) token_usage = response.get("usage", {})