diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 7ad8586296005..70fe11fd237b6 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -2202,20 +2202,20 @@ def _create_usage_metadata(oai_token_usage: dict) -> UsageMetadata: input_tokens = oai_token_usage.get("prompt_tokens", 0) output_tokens = oai_token_usage.get("completion_tokens", 0) total_tokens = oai_token_usage.get("total_tokens", input_tokens + output_tokens) + prompt_tokens_details = oai_token_usage.get("prompt_tokens_details") or {} + completion_tokens_details = oai_token_usage.get("completion_tokens_details") or {} input_token_details: dict = { - "audio": (oai_token_usage.get("prompt_tokens_details") or {}).get( - "audio_tokens" - ), - "cache_read": (oai_token_usage.get("prompt_tokens_details") or {}).get( - "cached_tokens" - ), + "audio": prompt_tokens_details.get("audio_tokens"), + "cache_read": prompt_tokens_details.get("cached_tokens"), } output_token_details: dict = { - "audio": (oai_token_usage.get("completion_tokens_details") or {}).get( - "audio_tokens" + "audio": completion_tokens_details.get("audio_tokens"), + "reasoning": completion_tokens_details.get("reasoning_tokens"), + "accepted_prediction": completion_tokens_details.get( + "accepted_prediction_tokens" ), - "reasoning": (oai_token_usage.get("completion_tokens_details") or {}).get( - "reasoning_tokens" + "rejected_prediction": completion_tokens_details.get( + "rejected_prediction_tokens" ), } return UsageMetadata(