Skip to content

Commit 02f9c7f

Browse files
committed
feat: updated current tests
1 parent e26abd6 commit 02f9c7f

File tree

3 files changed

+32
-5
lines changed

3 files changed

+32
-5
lines changed

tests/models/test_litellm_chatcompletions_stream.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ async def test_stream_response_yields_events_for_text_content(monkeypatch) -> No
5555
prompt_tokens=7,
5656
total_tokens=12,
5757
completion_tokens_details=CompletionTokensDetails(reasoning_tokens=2),
58-
prompt_tokens_details=PromptTokensDetails(cached_tokens=5),
58+
prompt_tokens_details=PromptTokensDetails(cached_tokens=6),
5959
),
6060
)
6161

@@ -122,6 +122,8 @@ async def patched_fetch_response(self, *args, **kwargs):
122122
assert completed_resp.usage.input_tokens == 7
123123
assert completed_resp.usage.output_tokens == 5
124124
assert completed_resp.usage.total_tokens == 12
125+
assert completed_resp.usage.input_tokens_details.cached_tokens == 6
126+
assert completed_resp.usage.output_tokens_details.reasoning_tokens == 2
125127

126128

127129
@pytest.mark.allow_call_model_methods

tests/test_openai_chatcompletions.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,10 @@
1313
ChatCompletionMessageToolCall,
1414
Function,
1515
)
16-
from openai.types.completion_usage import CompletionUsage
16+
from openai.types.completion_usage import (
17+
CompletionUsage,
18+
PromptTokensDetails,
19+
)
1720
from openai.types.responses import (
1821
Response,
1922
ResponseFunctionToolCall,
@@ -51,7 +54,13 @@ async def test_get_response_with_text_message(monkeypatch) -> None:
5154
model="fake",
5255
object="chat.completion",
5356
choices=[choice],
54-
usage=CompletionUsage(completion_tokens=5, prompt_tokens=7, total_tokens=12),
57+
usage=CompletionUsage(
58+
completion_tokens=5,
59+
prompt_tokens=7,
60+
total_tokens=12,
61+
# completion_tokens_details left blank to test default
62+
prompt_tokens_details=PromptTokensDetails(cached_tokens=3),
63+
),
5564
)
5665

5766
async def patched_fetch_response(self, *args, **kwargs):
@@ -81,6 +90,8 @@ async def patched_fetch_response(self, *args, **kwargs):
8190
assert resp.usage.input_tokens == 7
8291
assert resp.usage.output_tokens == 5
8392
assert resp.usage.total_tokens == 12
93+
assert resp.usage.input_tokens_details.cached_tokens == 3
94+
assert resp.usage.output_tokens_details.reasoning_tokens == 0
8495
assert resp.response_id is None
8596

8697

@@ -127,6 +138,8 @@ async def patched_fetch_response(self, *args, **kwargs):
127138
assert resp.usage.requests == 0
128139
assert resp.usage.input_tokens == 0
129140
assert resp.usage.output_tokens == 0
141+
assert resp.usage.input_tokens_details.cached_tokens == 0
142+
assert resp.usage.output_tokens_details.reasoning_tokens == 0
130143

131144

132145
@pytest.mark.allow_call_model_methods

tests/test_openai_chatcompletions_stream.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,11 @@
88
ChoiceDeltaToolCall,
99
ChoiceDeltaToolCallFunction,
1010
)
11-
from openai.types.completion_usage import CompletionUsage
11+
from openai.types.completion_usage import (
12+
CompletionTokensDetails,
13+
CompletionUsage,
14+
PromptTokensDetails,
15+
)
1216
from openai.types.responses import (
1317
Response,
1418
ResponseFunctionToolCall,
@@ -46,7 +50,13 @@ async def test_stream_response_yields_events_for_text_content(monkeypatch) -> No
4650
model="fake",
4751
object="chat.completion.chunk",
4852
choices=[Choice(index=0, delta=ChoiceDelta(content="llo"))],
49-
usage=CompletionUsage(completion_tokens=5, prompt_tokens=7, total_tokens=12),
53+
usage=CompletionUsage(
54+
completion_tokens=5,
55+
prompt_tokens=7,
56+
total_tokens=12,
57+
prompt_tokens_details=PromptTokensDetails(cached_tokens=2),
58+
completion_tokens_details=CompletionTokensDetails(reasoning_tokens=3),
59+
),
5060
)
5161

5262
async def fake_stream() -> AsyncIterator[ChatCompletionChunk]:
@@ -112,6 +122,8 @@ async def patched_fetch_response(self, *args, **kwargs):
112122
assert completed_resp.usage.input_tokens == 7
113123
assert completed_resp.usage.output_tokens == 5
114124
assert completed_resp.usage.total_tokens == 12
125+
assert completed_resp.usage.input_tokens_details.cached_tokens == 2
126+
assert completed_resp.usage.output_tokens_details.reasoning_tokens == 3
115127

116128

117129
@pytest.mark.allow_call_model_methods

0 commit comments

Comments
 (0)