From 183daa6e6fe11880728842ef2b54cc3c44e6f564 Mon Sep 17 00:00:00 2001 From: Sandeep Banerjee Date: Sat, 10 Feb 2024 07:30:24 +0530 Subject: [PATCH] google-genai[patch]: on_llm_new_token fix (#16924) ### This pull request makes the following changes: * Fixed issue #16913 Fixed the google gen ai chat_models.py code to make sure that the callback is called before the token is yielded --------- Co-authored-by: Erick Friis --- .../partners/google-genai/langchain_google_genai/chat_models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/partners/google-genai/langchain_google_genai/chat_models.py b/libs/partners/google-genai/langchain_google_genai/chat_models.py index 420dbcfd13595..496f54add5513 100644 --- a/libs/partners/google-genai/langchain_google_genai/chat_models.py +++ b/libs/partners/google-genai/langchain_google_genai/chat_models.py @@ -598,6 +598,7 @@ def _stream( for chunk in response: _chat_result = _response_to_result(chunk, stream=True) gen = cast(ChatGenerationChunk, _chat_result.generations[0]) + if run_manager: run_manager.on_llm_new_token(gen.text) yield gen @@ -622,6 +623,7 @@ async def _astream( ): _chat_result = _response_to_result(chunk, stream=True) gen = cast(ChatGenerationChunk, _chat_result.generations[0]) + if run_manager: await run_manager.on_llm_new_token(gen.text) yield gen