From 55af6fbd022e51ae7d2751e792c5934ca903e13e Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Fri, 20 Sep 2024 17:10:34 -0700 Subject: [PATCH] [LangChainTracer] Omit Chunk (#26602) in events / new llm token --- libs/core/langchain_core/tracers/langchain.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/libs/core/langchain_core/tracers/langchain.py b/libs/core/langchain_core/tracers/langchain.py index 5ac1f6496f57f..73aebab0480c9 100644 --- a/libs/core/langchain_core/tracers/langchain.py +++ b/libs/core/langchain_core/tracers/langchain.py @@ -23,6 +23,7 @@ from langchain_core.env import get_runtime_environment from langchain_core.load import dumpd +from langchain_core.outputs import ChatGenerationChunk, GenerationChunk from langchain_core.tracers.base import BaseTracer from langchain_core.tracers.schemas import Run @@ -240,6 +241,26 @@ def _on_llm_start(self, run: Run) -> None: run.reference_example_id = self.example_id self._persist_run_single(run) + def _llm_run_with_token_event( + self, + token: str, + run_id: UUID, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Run: + """ + Append token event to LLM run and return the run. + """ + return super()._llm_run_with_token_event( + # Drop the chunk; we don't need to save it + token, + run_id, + chunk=None, + parent_run_id=parent_run_id, + **kwargs, + ) + def _on_chat_model_start(self, run: Run) -> None: """Persist an LLM run.""" if run.parent_run_id is None: