|
17 | 17 | from litellm._logging import verbose_logger
|
18 | 18 | from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
|
19 | 19 | from litellm.types.llms.openai import (
|
| 20 | + InputTokensDetails, |
| 21 | + OutputTokensDetails, |
20 | 22 | ResponseAPIUsage,
|
21 | 23 | ResponsesAPIOptionalRequestParams,
|
22 | 24 | ResponsesAPIResponse,
|
@@ -370,9 +372,42 @@ def _transform_response_api_usage_to_chat_usage(
|
370 | 372 | completion_tokens=0,
|
371 | 373 | total_tokens=0,
|
372 | 374 | )
|
373 |
| - response_api_usage: ResponseAPIUsage = ( |
374 |
| - ResponseAPIUsage(**usage) if isinstance(usage, dict) else usage |
375 |
| - ) |
| 375 | + if isinstance(usage, dict): |
| 376 | + usage_clean = usage.copy() |
| 377 | + # Ensure numeric fields default to zero rather than None |
| 378 | + for numeric_key in ("input_tokens", "output_tokens", "total_tokens"): |
| 379 | + if usage_clean.get(numeric_key) is None: |
| 380 | + usage_clean[numeric_key] = 0 |
| 381 | + |
| 382 | + # Drop detail fields when provider returns None, or clean nested None values |
| 383 | + for detail_key in ("input_tokens_details", "output_tokens_details"): |
| 384 | + detail_value = usage_clean.get(detail_key) |
| 385 | + if detail_value is None: |
| 386 | + usage_clean.pop(detail_key, None) |
| 387 | + elif isinstance(detail_value, dict): |
| 388 | + usage_clean[detail_key] = { |
| 389 | + k: v for k, v in detail_value.items() if v is not None |
| 390 | + } |
| 391 | + |
| 392 | + response_api_usage: ResponseAPIUsage = ResponseAPIUsage(**usage_clean) |
| 393 | + else: |
| 394 | + response_api_usage = usage |
| 395 | + |
| 396 | + # Normalise token detail fields so they match OpenAI format |
| 397 | + input_details = response_api_usage.input_tokens_details |
| 398 | + if input_details is None: |
| 399 | + input_details = InputTokensDetails(cached_tokens=0) |
| 400 | + elif input_details.cached_tokens is None: |
| 401 | + input_details.cached_tokens = 0 |
| 402 | + response_api_usage.input_tokens_details = input_details |
| 403 | + |
| 404 | + output_details = response_api_usage.output_tokens_details |
| 405 | + if output_details is None: |
| 406 | + output_details = OutputTokensDetails(reasoning_tokens=0) |
| 407 | + elif output_details.reasoning_tokens is None: |
| 408 | + output_details.reasoning_tokens = 0 |
| 409 | + response_api_usage.output_tokens_details = output_details |
| 410 | + |
376 | 411 | prompt_tokens: int = response_api_usage.input_tokens or 0
|
377 | 412 | completion_tokens: int = response_api_usage.output_tokens or 0
|
378 | 413 | prompt_tokens_details: Optional[PromptTokensDetails] = None
|
|
0 commit comments