Skip to content

Commit

Permalink
refactor: change meta data fields (#911)
Browse files Browse the repository at this point in the history
* initial import

* formatting

* fixing tests

* removing warnings

* linting issues

* fixes due to conflicts
  • Loading branch information
davidsbatista authored Aug 12, 2024
1 parent a8b2de9 commit 0f1452a
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 14 deletions.
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import json
import logging
import re
import warnings
from typing import Any, Callable, ClassVar, Dict, List, Optional, Type

from botocore.exceptions import ClientError
Expand Down Expand Up @@ -150,12 +149,6 @@ def resolve_secret(secret: Optional[Secret]) -> Optional[str]:
self.stop_words = stop_words or []
self.streaming_callback = streaming_callback

warnings.warn(
"The `meta` output of the AmazonBedrockChatGenerator will change in the next release to be inline with "
"OpenAI `meta`output keys.",
stacklevel=2,
)

@component.output_types(replies=List[ChatMessage])
def run(
self,
Expand Down Expand Up @@ -210,6 +203,12 @@ def run(
msg = f"Could not inference Amazon Bedrock model {self.model} due: {exception}"
raise AmazonBedrockInferenceError(msg) from exception

# rename the meta key to be inline with OpenAI meta output keys
for response in replies:
if response.meta is not None and "usage" in response.meta:
response.meta["usage"]["prompt_tokens"] = response.meta["usage"].pop("input_tokens")
response.meta["usage"]["completion_tokens"] = response.meta["usage"].pop("output_tokens")

return {"replies": replies}

@classmethod
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import dataclasses
import json
import warnings
from typing import Any, Callable, ClassVar, Dict, List, Optional, Union

from haystack import component, default_from_dict, default_to_dict, logging
Expand Down Expand Up @@ -115,12 +114,6 @@ def __init__(
self.client = Anthropic(api_key=self.api_key.resolve_value())
self.ignore_tools_thinking_messages = ignore_tools_thinking_messages

warnings.warn(
"The `meta` output of the AnthropicChatGenerator will change in the next release to be inline with "
"OpenAI `meta`output keys.",
stacklevel=2,
)

def _get_telemetry_data(self) -> Dict[str, Any]:
"""
Data that is sent to Posthog for usage analytics.
Expand Down Expand Up @@ -220,13 +213,20 @@ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str,
# capture stop reason and stop sequence
delta = stream_event
completions = [self._connect_chunks(chunks, start_event, delta)]

# if streaming is disabled, the response is an Anthropic Message
elif isinstance(response, Message):
has_tools_msgs = any(isinstance(content_block, ToolUseBlock) for content_block in response.content)
if has_tools_msgs and self.ignore_tools_thinking_messages:
response.content = [block for block in response.content if isinstance(block, ToolUseBlock)]
completions = [self._build_message(content_block, response) for content_block in response.content]

# rename the meta key to be inline with OpenAI meta output keys
for response in completions:
if response.meta is not None and "usage" in response.meta:
response.meta["usage"]["prompt_tokens"] = response.meta["usage"].pop("input_tokens")
response.meta["usage"]["completion_tokens"] = response.meta["usage"].pop("output_tokens")

return {"replies": completions}

def _build_message(self, content_block: Union[TextBlock, ToolUseBlock], message: Message) -> ChatMessage:
Expand Down

0 comments on commit 0f1452a

Please sign in to comment.