Skip to content

Commit

Permalink
[LangChainTracer] Omit Chunk (#26602)
Browse files Browse the repository at this point in the history
in events / new llm token
  • Loading branch information
hinthornw authored Sep 21, 2024
1 parent 3e2cb4e commit 55af6fb
Showing 1 changed file with 21 additions and 0 deletions.
21 changes: 21 additions & 0 deletions libs/core/langchain_core/tracers/langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

from langchain_core.env import get_runtime_environment
from langchain_core.load import dumpd
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run

Expand Down Expand Up @@ -240,6 +241,26 @@ def _on_llm_start(self, run: Run) -> None:
run.reference_example_id = self.example_id
self._persist_run_single(run)

def _llm_run_with_token_event(
self,
token: str,
run_id: UUID,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Run:
"""
Append token event to LLM run and return the run.
"""
return super()._llm_run_with_token_event(
# Drop the chunk; we don't need to save it
token,
run_id,
chunk=None,
parent_run_id=parent_run_id,
**kwargs,
)

def _on_chat_model_start(self, run: Run) -> None:
"""Persist an LLM run."""
if run.parent_run_id is None:
Expand Down

0 comments on commit 55af6fb

Please sign in to comment.