@@ -51,57 +51,56 @@ def test_calculate_usage():
51
51
"output_tokens" : 550 ,
52
52
}
53
53
usage = config .calculate_usage (usage_object = usage_object , reasoning_content = None )
54
- assert usage .prompt_tokens == 3
54
+ assert usage .prompt_tokens == 12307
55
55
assert usage .completion_tokens == 550
56
- assert usage .total_tokens == 3 + 550
56
+ assert usage .total_tokens == 12307 + 550
57
57
assert usage .prompt_tokens_details .cached_tokens == 0
58
+ assert usage .prompt_tokens_details .cache_creation_tokens == 12304
58
59
assert usage ._cache_creation_input_tokens == 12304
59
60
assert usage ._cache_read_input_tokens == 0
60
61
61
- @pytest .mark .parametrize ("usage_object,expected_usage" , [
62
- [
63
- {
64
- "cache_creation_input_tokens" : None ,
65
- "cache_read_input_tokens" : None ,
66
- "input_tokens" : None ,
67
- "output_tokens" : 43 ,
68
- "server_tool_use" : None
69
- },
70
- {
71
- "prompt_tokens" : 0 ,
72
- "completion_tokens" : 43 ,
73
- "total_tokens" : 43 ,
74
- "_cache_creation_input_tokens" : 0 ,
75
- "_cache_read_input_tokens" : 0
76
- }
77
- ],
62
+
63
+ @pytest .mark .parametrize (
64
+ "usage_object,expected_usage" ,
78
65
[
79
- {
80
- "cache_creation_input_tokens" : 100 ,
81
- "cache_read_input_tokens" : 200 ,
82
- "input_tokens" : 1 ,
83
- "output_tokens" : None ,
84
- "server_tool_use" : None
85
- },
86
- {
87
- "prompt_tokens" : 1 + 200 ,
88
- "completion_tokens" : 0 ,
89
- "total_tokens" : 1 + 200 ,
90
- "_cache_creation_input_tokens" : 100 ,
91
- "_cache_read_input_tokens" : 200 ,
92
- }
66
+ [
67
+ {
68
+ "cache_creation_input_tokens" : None ,
69
+ "cache_read_input_tokens" : None ,
70
+ "input_tokens" : None ,
71
+ "output_tokens" : 43 ,
72
+ "server_tool_use" : None ,
73
+ },
74
+ {
75
+ "prompt_tokens" : 0 ,
76
+ "completion_tokens" : 43 ,
77
+ "total_tokens" : 43 ,
78
+ "_cache_creation_input_tokens" : 0 ,
79
+ "_cache_read_input_tokens" : 0 ,
80
+ },
81
+ ],
82
+ [
83
+ {
84
+ "cache_creation_input_tokens" : 100 ,
85
+ "cache_read_input_tokens" : 200 ,
86
+ "input_tokens" : 1 ,
87
+ "output_tokens" : None ,
88
+ "server_tool_use" : None ,
89
+ },
90
+ {
91
+ "prompt_tokens" : 1 + 200 + 100 ,
92
+ "completion_tokens" : 0 ,
93
+ "total_tokens" : 1 + 200 + 100 ,
94
+ "_cache_creation_input_tokens" : 100 ,
95
+ "_cache_read_input_tokens" : 200 ,
96
+ },
97
+ ],
98
+ [
99
+ {"server_tool_use" : {"web_search_requests" : 10 }},
100
+ {"server_tool_use" : ServerToolUse (web_search_requests = 10 )},
101
+ ],
93
102
],
94
- [
95
- {
96
- "server_tool_use" : {
97
- "web_search_requests" : 10
98
- }
99
- },
100
- {
101
- "server_tool_use" : ServerToolUse (web_search_requests = 10 )
102
- }
103
- ]
104
- ])
103
+ )
105
104
def test_calculate_usage_nulls (usage_object , expected_usage ):
106
105
"""
107
106
Correctly deal with null values in usage object
@@ -115,27 +114,23 @@ def test_calculate_usage_nulls(usage_object, expected_usage):
115
114
assert hasattr (usage , k )
116
115
assert getattr (usage , k ) == v
117
116
118
- @pytest .mark .parametrize ("usage_object" , [
119
- {
120
- "server_tool_use" : {
121
- "web_search_requests" : None
122
- }
123
- },
124
- {
125
- "server_tool_use" : None
126
- }
127
- ])
117
+
118
+ @pytest .mark .parametrize (
119
+ "usage_object" ,
120
+ [{"server_tool_use" : {"web_search_requests" : None }}, {"server_tool_use" : None }],
121
+ )
128
122
def test_calculate_usage_server_tool_null (usage_object ):
129
123
"""
130
124
Correctly deal with null values in usage object
131
125
132
126
Fixes https://github.com/BerriAI/litellm/issues/11920
133
127
"""
134
128
config = AnthropicConfig ()
135
-
129
+
136
130
usage = config .calculate_usage (usage_object = usage_object , reasoning_content = None )
137
131
assert not hasattr (usage , "server_tool_use" )
138
132
133
+
139
134
def test_extract_response_content_with_citations ():
140
135
config = AnthropicConfig ()
141
136
0 commit comments