Skip to content

Commit 8f3105f

Browse files
update test mock object to incude cache info.
1 parent 119a43d commit 8f3105f

File tree

1 file changed

+5
-0
lines changed

1 file changed

+5
-0
lines changed

tests/llm/test_response_api.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ def create_mock_openai_chat_completion(
5656
# or if get_tokens_counts_from_response had different fallback logic.
5757
completion.usage.prompt_tokens = prompt_tokens
5858
completion.usage.completion_tokens = completion_tokens
59+
completion.usage.prompt_tokens_details.cached_tokens = 0
5960

6061
completion.model_dump.return_value = {
6162
"id": "chatcmpl-xxxx",
@@ -69,6 +70,7 @@ def create_mock_openai_chat_completion(
6970
"output_tokens": completion_tokens, # Generic name
7071
"prompt_tokens": prompt_tokens, # OpenAI specific
7172
"completion_tokens": completion_tokens, # OpenAI specific
73+
"prompt_tokens_details": {"cached_tokens": 0},
7274
},
7375
}
7476
message.to_dict.return_value = {
@@ -166,6 +168,8 @@ def create_mock_anthropic_response(
166168
response.usage = MagicMock()
167169
response.usage.input_tokens = input_tokens
168170
response.usage.output_tokens = output_tokens
171+
response.usage.cache_input_tokens = 0
172+
response.usage.cache_creation_input_tokens = 0
169173
return response
170174

171175

@@ -207,6 +211,7 @@ def create_mock_openai_responses_api_response(
207211
response_mock.usage.output_tokens = output_tokens
208212
response_mock.usage.prompt_tokens = input_tokens
209213
response_mock.usage.completion_tokens = output_tokens
214+
response_mock.usage.input_tokens_details.cached_tokens = 0
210215

211216
return response_mock
212217

0 commit comments

Comments
 (0)