Skip to content

Commit 6cc68f4

Browse files
committed
feat: updated tests
1 parent 361ec20 commit 6cc68f4

File tree

4 files changed

+48
-12
lines changed

4 files changed

+48
-12
lines changed

tests/models/test_litellm_chatcompletions_stream.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,11 @@
88
ChoiceDeltaToolCall,
99
ChoiceDeltaToolCallFunction,
1010
)
11-
from openai.types.completion_usage import CompletionUsage
11+
from openai.types.completion_usage import (
12+
CompletionTokensDetails,
13+
CompletionUsage,
14+
PromptTokensDetails,
15+
)
1216
from openai.types.responses import (
1317
Response,
1418
ResponseFunctionToolCall,
@@ -46,7 +50,13 @@ async def test_stream_response_yields_events_for_text_content(monkeypatch) -> No
4650
model="fake",
4751
object="chat.completion.chunk",
4852
choices=[Choice(index=0, delta=ChoiceDelta(content="llo"))],
49-
usage=CompletionUsage(completion_tokens=5, prompt_tokens=7, total_tokens=12),
53+
usage=CompletionUsage(
54+
completion_tokens=5,
55+
prompt_tokens=7,
56+
total_tokens=12,
57+
completion_tokens_details=CompletionTokensDetails(reasoning_tokens=2),
58+
prompt_tokens_details=PromptTokensDetails(cached_tokens=5),
59+
),
5060
)
5161

5262
async def fake_stream() -> AsyncIterator[ChatCompletionChunk]:

tests/models/test_litellm_extra_body.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,14 @@ async def fake_acompletion(model, messages=None, **kwargs):
2222
captured.update(kwargs)
2323
msg = Message(role="assistant", content="ok")
2424
choice = Choices(index=0, message=msg)
25-
return ModelResponse(choices=[choice], usage=Usage(0, 0, 0))
25+
return ModelResponse(
26+
choices=[choice],
27+
usage=Usage(0, 0, 0),
28+
)
2629

2730
monkeypatch.setattr(litellm, "acompletion", fake_acompletion)
2831
settings = ModelSettings(
29-
temperature=0.1,
30-
extra_body={"cached_content": "some_cache", "foo": 123}
32+
temperature=0.1, extra_body={"cached_content": "some_cache", "foo": 123}
3133
)
3234
model = LitellmModel(model="test-model")
3335

tests/test_extra_headers.py

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import pytest
22
from openai.types.chat.chat_completion import ChatCompletion, Choice
33
from openai.types.chat.chat_completion_message import ChatCompletionMessage
4+
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
45

56
from agents import ModelSettings, ModelTracing, OpenAIChatCompletionsModel, OpenAIResponsesModel
67

@@ -17,21 +18,29 @@ class DummyResponses:
1718
async def create(self, **kwargs):
1819
nonlocal called_kwargs
1920
called_kwargs = kwargs
21+
2022
class DummyResponse:
2123
id = "dummy"
2224
output = []
2325
usage = type(
24-
"Usage", (), {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
26+
"Usage",
27+
(),
28+
{
29+
"input_tokens": 0,
30+
"output_tokens": 0,
31+
"total_tokens": 0,
32+
"input_tokens_details": InputTokensDetails(cached_tokens=0),
33+
"output_tokens_details": OutputTokensDetails(reasoning_tokens=0),
34+
},
2535
)()
36+
2637
return DummyResponse()
2738

2839
class DummyClient:
2940
def __init__(self):
3041
self.responses = DummyResponses()
3142

32-
33-
34-
model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyClient()) # type: ignore
43+
model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyClient()) # type: ignore
3544
extra_headers = {"X-Test-Header": "test-value"}
3645
await model.get_response(
3746
system_instructions=None,
@@ -47,7 +56,6 @@ def __init__(self):
4756
assert called_kwargs["extra_headers"]["X-Test-Header"] == "test-value"
4857

4958

50-
5159
@pytest.mark.allow_call_model_methods
5260
@pytest.mark.asyncio
5361
async def test_extra_headers_passed_to_openai_client():
@@ -76,7 +84,7 @@ def __init__(self):
7684
self.chat = type("_Chat", (), {"completions": DummyCompletions()})()
7785
self.base_url = "https://api.openai.com"
7886

79-
model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyClient()) # type: ignore
87+
model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyClient()) # type: ignore
8088
extra_headers = {"X-Test-Header": "test-value"}
8189
await model.get_response(
8290
system_instructions=None,

tests/test_responses_tracing.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from inline_snapshot import snapshot
33
from openai import AsyncOpenAI
44
from openai.types.responses import ResponseCompletedEvent
5+
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
56

67
from agents import ModelSettings, ModelTracing, OpenAIResponsesModel, trace
78
from agents.tracing.span_data import ResponseSpanData
@@ -16,10 +17,25 @@ def is_disabled(self):
1617

1718

1819
class DummyUsage:
19-
def __init__(self, input_tokens=1, output_tokens=1, total_tokens=2):
20+
def __init__(
21+
self,
22+
input_tokens: int = 1,
23+
input_tokens_details: InputTokensDetails | None = None,
24+
output_tokens: int = 1,
25+
output_tokens_details: OutputTokensDetails | None = None,
26+
total_tokens: int = 2,
27+
):
2028
self.input_tokens = input_tokens
2129
self.output_tokens = output_tokens
2230
self.total_tokens = total_tokens
31+
self.input_tokens_details = (
32+
input_tokens_details if input_tokens_details else InputTokensDetails(cached_tokens=0)
33+
)
34+
self.output_tokens_details = (
35+
output_tokens_details
36+
if output_tokens_details
37+
else OutputTokensDetails(reasoning_tokens=0)
38+
)
2339

2440

2541
class DummyResponse:

0 commit comments

Comments
 (0)