Skip to content

Commit 8273630

Browse files
test: update tests
1 parent 0e747aa commit 8273630

File tree

1 file changed

+50
-55
lines changed

1 file changed

+50
-55
lines changed

tests/test_litellm/llms/anthropic/chat/test_anthropic_chat_transformation.py

Lines changed: 50 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -51,57 +51,56 @@ def test_calculate_usage():
5151
"output_tokens": 550,
5252
}
5353
usage = config.calculate_usage(usage_object=usage_object, reasoning_content=None)
54-
assert usage.prompt_tokens == 3
54+
assert usage.prompt_tokens == 12307
5555
assert usage.completion_tokens == 550
56-
assert usage.total_tokens == 3 + 550
56+
assert usage.total_tokens == 12307 + 550
5757
assert usage.prompt_tokens_details.cached_tokens == 0
58+
assert usage.prompt_tokens_details.cache_creation_tokens == 12304
5859
assert usage._cache_creation_input_tokens == 12304
5960
assert usage._cache_read_input_tokens == 0
6061

61-
@pytest.mark.parametrize("usage_object,expected_usage", [
62-
[
63-
{
64-
"cache_creation_input_tokens": None,
65-
"cache_read_input_tokens": None,
66-
"input_tokens": None,
67-
"output_tokens": 43,
68-
"server_tool_use": None
69-
},
70-
{
71-
"prompt_tokens": 0,
72-
"completion_tokens": 43,
73-
"total_tokens": 43,
74-
"_cache_creation_input_tokens": 0,
75-
"_cache_read_input_tokens": 0
76-
}
77-
],
62+
63+
@pytest.mark.parametrize(
64+
"usage_object,expected_usage",
7865
[
79-
{
80-
"cache_creation_input_tokens": 100,
81-
"cache_read_input_tokens": 200,
82-
"input_tokens": 1,
83-
"output_tokens": None,
84-
"server_tool_use": None
85-
},
86-
{
87-
"prompt_tokens": 1 + 200,
88-
"completion_tokens": 0,
89-
"total_tokens": 1 + 200,
90-
"_cache_creation_input_tokens": 100,
91-
"_cache_read_input_tokens": 200,
92-
}
66+
[
67+
{
68+
"cache_creation_input_tokens": None,
69+
"cache_read_input_tokens": None,
70+
"input_tokens": None,
71+
"output_tokens": 43,
72+
"server_tool_use": None,
73+
},
74+
{
75+
"prompt_tokens": 0,
76+
"completion_tokens": 43,
77+
"total_tokens": 43,
78+
"_cache_creation_input_tokens": 0,
79+
"_cache_read_input_tokens": 0,
80+
},
81+
],
82+
[
83+
{
84+
"cache_creation_input_tokens": 100,
85+
"cache_read_input_tokens": 200,
86+
"input_tokens": 1,
87+
"output_tokens": None,
88+
"server_tool_use": None,
89+
},
90+
{
91+
"prompt_tokens": 1 + 200 + 100,
92+
"completion_tokens": 0,
93+
"total_tokens": 1 + 200 + 100,
94+
"_cache_creation_input_tokens": 100,
95+
"_cache_read_input_tokens": 200,
96+
},
97+
],
98+
[
99+
{"server_tool_use": {"web_search_requests": 10}},
100+
{"server_tool_use": ServerToolUse(web_search_requests=10)},
101+
],
93102
],
94-
[
95-
{
96-
"server_tool_use": {
97-
"web_search_requests": 10
98-
}
99-
},
100-
{
101-
"server_tool_use": ServerToolUse(web_search_requests=10)
102-
}
103-
]
104-
])
103+
)
105104
def test_calculate_usage_nulls(usage_object, expected_usage):
106105
"""
107106
Correctly deal with null values in usage object
@@ -115,27 +114,23 @@ def test_calculate_usage_nulls(usage_object, expected_usage):
115114
assert hasattr(usage, k)
116115
assert getattr(usage, k) == v
117116

118-
@pytest.mark.parametrize("usage_object", [
119-
{
120-
"server_tool_use": {
121-
"web_search_requests": None
122-
}
123-
},
124-
{
125-
"server_tool_use": None
126-
}
127-
])
117+
118+
@pytest.mark.parametrize(
119+
"usage_object",
120+
[{"server_tool_use": {"web_search_requests": None}}, {"server_tool_use": None}],
121+
)
128122
def test_calculate_usage_server_tool_null(usage_object):
129123
"""
130124
Correctly deal with null values in usage object
131125
132126
Fixes https://github.com/BerriAI/litellm/issues/11920
133127
"""
134128
config = AnthropicConfig()
135-
129+
136130
usage = config.calculate_usage(usage_object=usage_object, reasoning_content=None)
137131
assert not hasattr(usage, "server_tool_use")
138132

133+
139134
def test_extract_response_content_with_citations():
140135
config = AnthropicConfig()
141136

0 commit comments

Comments
 (0)