-
Notifications
You must be signed in to change notification settings - Fork 159
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #18 from langchain-ai/cost_cb
added cost callback
- Loading branch information
Showing
7 changed files
with
264 additions
and
38 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
import threading | ||
from typing import Any, Dict, List | ||
|
||
from langchain_core.callbacks import BaseCallbackHandler | ||
from langchain_core.outputs import LLMResult | ||
|
||
|
||
class VertexAICallbackHandler(BaseCallbackHandler): | ||
"""Callback Handler that tracks VertexAI info.""" | ||
|
||
prompt_tokens: int = 0 | ||
prompt_characters: int = 0 | ||
completion_tokens: int = 0 | ||
completion_characters: int = 0 | ||
successful_requests: int = 0 | ||
|
||
def __init__(self) -> None: | ||
super().__init__() | ||
self._lock = threading.Lock() | ||
|
||
def __repr__(self) -> str: | ||
return ( | ||
f"\tPrompt tokens: {self.prompt_tokens}\n" | ||
f"\tPrompt characters: {self.prompt_characters}\n" | ||
f"\tCompletion tokens: {self.completion_tokens}\n" | ||
f"\tCompletion characters: {self.completion_characters}\n" | ||
f"Successful requests: {self.successful_requests}\n" | ||
) | ||
|
||
@property | ||
def always_verbose(self) -> bool: | ||
"""Whether to call verbose callbacks even if verbose is False.""" | ||
return True | ||
|
||
def on_llm_start( | ||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any | ||
) -> None: | ||
"""Runs when LLM starts running.""" | ||
pass | ||
|
||
def on_llm_new_token(self, token: str, **kwargs: Any) -> None: | ||
"""Runs on new LLM token. Only available when streaming is enabled.""" | ||
pass | ||
|
||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: | ||
"""Collects token usage.""" | ||
completion_tokens, prompt_tokens = 0, 0 | ||
completion_characters, prompt_characters = 0, 0 | ||
for generations in response.generations: | ||
if len(generations) > 0 and generations[0].generation_info: | ||
usage_metadata = generations[0].generation_info.get( | ||
"usage_metadata", {} | ||
) | ||
completion_tokens += usage_metadata.get("candidates_token_count", 0) | ||
prompt_tokens += usage_metadata.get("prompt_token_count", 0) | ||
completion_characters += usage_metadata.get( | ||
"candidates_billable_characters", 0 | ||
) | ||
prompt_characters += usage_metadata.get("prompt_billable_characters", 0) | ||
|
||
with self._lock: | ||
self.prompt_characters += prompt_characters | ||
self.prompt_tokens += prompt_tokens | ||
self.completion_characters += completion_characters | ||
self.completion_tokens += completion_tokens | ||
self.successful_requests += 1 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,84 @@ | ||
import pytest | ||
from langchain_core.messages import HumanMessage | ||
|
||
from langchain_google_vertexai.callbacks import VertexAICallbackHandler | ||
from langchain_google_vertexai.chat_models import ChatVertexAI | ||
from langchain_google_vertexai.llms import VertexAI | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"model_name", | ||
["gemini-pro", "text-bison@001", "code-bison@001"], | ||
) | ||
def test_llm_invoke(model_name: str) -> None: | ||
vb = VertexAICallbackHandler() | ||
llm = VertexAI(model_name=model_name, temperature=0.0, callbacks=[vb]) | ||
_ = llm.invoke("2+2") | ||
assert vb.successful_requests == 1 | ||
assert vb.prompt_tokens > 0 | ||
assert vb.completion_tokens > 0 | ||
prompt_tokens = vb.prompt_tokens | ||
completion_tokens = vb.completion_tokens | ||
_ = llm.invoke("2+2") | ||
assert vb.successful_requests == 2 | ||
assert vb.prompt_tokens > prompt_tokens | ||
assert vb.completion_tokens > completion_tokens | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"model_name", | ||
["gemini-pro", "chat-bison@001", "codechat-bison@001"], | ||
) | ||
def test_chat_call(model_name: str) -> None: | ||
vb = VertexAICallbackHandler() | ||
llm = ChatVertexAI(model_name=model_name, temperature=0.0, callbacks=[vb]) | ||
message = HumanMessage(content="Hello") | ||
_ = llm([message]) | ||
assert vb.successful_requests == 1 | ||
assert vb.prompt_tokens > 0 | ||
assert vb.completion_tokens > 0 | ||
prompt_tokens = vb.prompt_tokens | ||
completion_tokens = vb.completion_tokens | ||
_ = llm([message]) | ||
assert vb.successful_requests == 2 | ||
assert vb.prompt_tokens > prompt_tokens | ||
assert vb.completion_tokens > completion_tokens | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"model_name", | ||
["gemini-pro", "text-bison@001", "code-bison@001"], | ||
) | ||
def test_invoke_config(model_name: str) -> None: | ||
vb = VertexAICallbackHandler() | ||
llm = VertexAI(model_name=model_name, temperature=0.0) | ||
llm.invoke("2+2", config={"callbacks": [vb]}) | ||
assert vb.successful_requests == 1 | ||
assert vb.prompt_tokens > 0 | ||
assert vb.completion_tokens > 0 | ||
prompt_tokens = vb.prompt_tokens | ||
completion_tokens = vb.completion_tokens | ||
llm.invoke("2+2", config={"callbacks": [vb]}) | ||
assert vb.successful_requests == 2 | ||
assert vb.prompt_tokens > prompt_tokens | ||
assert vb.completion_tokens > completion_tokens | ||
|
||
|
||
def test_llm_stream() -> None: | ||
vb = VertexAICallbackHandler() | ||
llm = VertexAI(model_name="gemini-pro", temperature=0.0, callbacks=[vb]) | ||
for _ in llm.stream("2+2"): | ||
pass | ||
assert vb.successful_requests == 1 | ||
assert vb.prompt_tokens > 0 | ||
assert vb.completion_tokens > 0 | ||
|
||
|
||
def test_chat_stream() -> None: | ||
vb = VertexAICallbackHandler() | ||
llm = ChatVertexAI(model_name="gemini-pro", temperature=0.0, callbacks=[vb]) | ||
for _ in llm.stream("2+2"): | ||
pass | ||
assert vb.successful_requests == 1 | ||
assert vb.completion_tokens > 0 | ||
assert vb.completion_tokens > 0 |
Oops, something went wrong.