Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 38 additions & 3 deletions litellm/responses/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
from litellm._logging import verbose_logger
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.types.llms.openai import (
InputTokensDetails,
OutputTokensDetails,
ResponseAPIUsage,
ResponsesAPIOptionalRequestParams,
ResponsesAPIResponse,
Expand Down Expand Up @@ -370,9 +372,42 @@ def _transform_response_api_usage_to_chat_usage(
completion_tokens=0,
total_tokens=0,
)
response_api_usage: ResponseAPIUsage = (
ResponseAPIUsage(**usage) if isinstance(usage, dict) else usage
)
if isinstance(usage, dict):
usage_clean = usage.copy()
# Ensure numeric fields default to zero rather than None
for numeric_key in ("input_tokens", "output_tokens", "total_tokens"):
if usage_clean.get(numeric_key) is None:
usage_clean[numeric_key] = 0

# Drop detail fields when provider returns None, or clean nested None values
for detail_key in ("input_tokens_details", "output_tokens_details"):
detail_value = usage_clean.get(detail_key)
if detail_value is None:
usage_clean.pop(detail_key, None)
elif isinstance(detail_value, dict):
usage_clean[detail_key] = {
k: v for k, v in detail_value.items() if v is not None
}

response_api_usage: ResponseAPIUsage = ResponseAPIUsage(**usage_clean)
else:
response_api_usage = usage

# Normalise token detail fields so they match OpenAI format
input_details = response_api_usage.input_tokens_details
if input_details is None:
input_details = InputTokensDetails(cached_tokens=0)
elif input_details.cached_tokens is None:
input_details.cached_tokens = 0
response_api_usage.input_tokens_details = input_details

output_details = response_api_usage.output_tokens_details
if output_details is None:
output_details = OutputTokensDetails(reasoning_tokens=0)
elif output_details.reasoning_tokens is None:
output_details.reasoning_tokens = 0
response_api_usage.output_tokens_details = output_details

prompt_tokens: int = response_api_usage.input_tokens or 0
completion_tokens: int = response_api_usage.output_tokens or 0
prompt_tokens_details: Optional[PromptTokensDetails] = None
Expand Down
9 changes: 6 additions & 3 deletions tests/test_litellm/responses/test_responses_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,10 +188,11 @@ def test_transform_response_api_usage_with_none_values(self):
"""Test transformation handles None values properly"""
# Setup
usage = {
"input_tokens": 0, # Changed from None to 0
"input_tokens": None,
"output_tokens": 20,
"total_tokens": 20,
"output_tokens_details": {"reasoning_tokens": 5},
"total_tokens": None,
"input_tokens_details": None,
"output_tokens_details": {"reasoning_tokens": None},
}

# Execute
Expand All @@ -203,3 +204,5 @@ def test_transform_response_api_usage_with_none_values(self):
assert result.prompt_tokens == 0
assert result.completion_tokens == 20
assert result.total_tokens == 20
assert result.prompt_tokens_details is not None
assert result.prompt_tokens_details.cached_tokens == 0
Loading