Skip to content

Commit 361ec20

Browse files
committed
feat: implemented tokens_details throughout src
1 parent a08fe82 commit 361ec20

File tree

5 files changed

+67
-13
lines changed

5 files changed

+67
-13
lines changed

src/agents/extensions/models/litellm_model.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from typing import Any, Literal, cast, overload
77

88
import litellm.types
9+
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
910

1011
from agents.exceptions import ModelBehaviorError
1112

@@ -107,6 +108,16 @@ async def get_response(
107108
input_tokens=response_usage.prompt_tokens,
108109
output_tokens=response_usage.completion_tokens,
109110
total_tokens=response_usage.total_tokens,
111+
input_tokens_details=InputTokensDetails(
112+
cached_tokens=getattr(
113+
response_usage.prompt_tokens_details, "cached_tokens", 0
114+
)
115+
),
116+
output_tokens_details=OutputTokensDetails(
117+
reasoning_tokens=getattr(
118+
response_usage.completion_tokens_details, "reasoning_tokens", 0
119+
)
120+
),
110121
)
111122
if response.usage
112123
else Usage()

src/agents/models/openai_chatcompletions.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from openai.types import ChatModel
1010
from openai.types.chat import ChatCompletion, ChatCompletionChunk
1111
from openai.types.responses import Response
12+
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
1213

1314
from .. import _debug
1415
from ..agent_output import AgentOutputSchemaBase
@@ -83,6 +84,18 @@ async def get_response(
8384
input_tokens=response.usage.prompt_tokens,
8485
output_tokens=response.usage.completion_tokens,
8586
total_tokens=response.usage.total_tokens,
87+
input_tokens_details=InputTokensDetails(
88+
cached_tokens=getattr(
89+
response.usage.prompt_tokens_details, "cached_tokens", 0
90+
)
91+
or 0,
92+
),
93+
output_tokens_details=OutputTokensDetails(
94+
reasoning_tokens=getattr(
95+
response.usage.completion_tokens_details, "reasoning_tokens", 0
96+
)
97+
or 0,
98+
),
8699
)
87100
if response.usage
88101
else Usage()
@@ -252,7 +265,7 @@ async def _fetch_response(
252265
stream_options=self._non_null_or_not_given(stream_options),
253266
store=self._non_null_or_not_given(store),
254267
reasoning_effort=self._non_null_or_not_given(reasoning_effort),
255-
extra_headers={ **HEADERS, **(model_settings.extra_headers or {}) },
268+
extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
256269
extra_query=model_settings.extra_query,
257270
extra_body=model_settings.extra_body,
258271
metadata=self._non_null_or_not_given(model_settings.metadata),

src/agents/models/openai_responses.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,8 @@ async def get_response(
9898
input_tokens=response.usage.input_tokens,
9999
output_tokens=response.usage.output_tokens,
100100
total_tokens=response.usage.total_tokens,
101+
input_tokens_details=response.usage.input_tokens_details,
102+
output_tokens_details=response.usage.output_tokens_details,
101103
)
102104
if response.usage
103105
else Usage()

src/agents/run.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -689,6 +689,8 @@ async def _run_single_turn_streamed(
689689
input_tokens=event.response.usage.input_tokens,
690690
output_tokens=event.response.usage.output_tokens,
691691
total_tokens=event.response.usage.total_tokens,
692+
input_tokens_details=event.response.usage.input_tokens_details,
693+
output_tokens_details=event.response.usage.output_tokens_details,
692694
)
693695
if event.response.usage
694696
else Usage()

src/agents/usage.py

Lines changed: 38 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,34 @@
1-
from dataclasses import dataclass
1+
from dataclasses import dataclass, field
22
from typing import TypeVar
33

44
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
5+
from pydantic import BaseModel
56

6-
T = TypeVar("T", bound="InputTokensDetails | OutputTokensDetails")
7+
T = TypeVar("T", bound=BaseModel)
78

89

9-
def add_numeric_fields(current: T, other: T) -> None:
10-
for field in current.__dataclass_fields__:
11-
v1 = getattr(current, field, 0)
12-
v2 = getattr(other, field, 0)
10+
def add_numeric_fields(current: T, other: T) -> T:
11+
"""
12+
Add numeric fields from other to current.
13+
"""
14+
clone = current.model_copy()
15+
for key, v1 in current.model_dump().items():
16+
v2 = getattr(other, key, 0)
1317
if isinstance(v1, (int, float)) and isinstance(v2, (int, float)):
14-
setattr(current, field, (v1 or 0) + (v2 or 0))
18+
setattr(clone, key, (v1 or 0) + (v2 or 0))
19+
return clone
20+
21+
22+
def add_input_tokens_details(
23+
current: InputTokensDetails, other: InputTokensDetails
24+
) -> InputTokensDetails:
25+
return add_numeric_fields(current, other)
26+
27+
28+
def add_output_tokens_details(
29+
current: OutputTokensDetails, other: OutputTokensDetails
30+
) -> OutputTokensDetails:
31+
return add_numeric_fields(current, other)
1532

1633

1734
@dataclass
@@ -22,12 +39,17 @@ class Usage:
2239
input_tokens: int = 0
2340
"""Total input tokens sent, across all requests."""
2441

25-
input_tokens_details: InputTokensDetails = InputTokensDetails(cached_tokens=0)
26-
42+
input_tokens_details: InputTokensDetails = field(
43+
default_factory=lambda: InputTokensDetails(cached_tokens=0)
44+
)
45+
"""Details about the input tokens, matching responses API usage details."""
2746
output_tokens: int = 0
2847
"""Total output tokens received, across all requests."""
2948

30-
output_tokens_details: OutputTokensDetails = OutputTokensDetails(reasoning_tokens=0)
49+
output_tokens_details: OutputTokensDetails = field(
50+
default_factory=lambda: OutputTokensDetails(reasoning_tokens=0)
51+
)
52+
"""Details about the output tokens, matching responses API usage details."""
3153

3254
total_tokens: int = 0
3355
"""Total tokens sent and received, across all requests."""
@@ -37,5 +59,9 @@ def add(self, other: "Usage") -> None:
3759
self.input_tokens += other.input_tokens if other.input_tokens else 0
3860
self.output_tokens += other.output_tokens if other.output_tokens else 0
3961
self.total_tokens += other.total_tokens if other.total_tokens else 0
40-
add_numeric_fields(self.input_tokens_details, other.input_tokens_details)
41-
add_numeric_fields(self.output_tokens_details, other.output_tokens_details)
62+
self.input_tokens_details = add_input_tokens_details(
63+
self.input_tokens_details, other.input_tokens_details
64+
)
65+
self.output_tokens_details = add_output_tokens_details(
66+
self.output_tokens_details, other.output_tokens_details
67+
)

0 commit comments

Comments
 (0)