Skip to content

Commit fa175e8

Browse files
Fix gemini cli error (#14417)
* Fix gemini cli error * Added better handling --------- Co-authored-by: Ishaan Jaff <[email protected]>
1 parent 1a123b2 commit fa175e8

File tree

2 files changed

+127
-23
lines changed

2 files changed

+127
-23
lines changed

litellm/llms/gemini/count_tokens/handler.py

Lines changed: 48 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,39 @@
1111
else:
1212
GenerateContentContentListUnionDict = Any
1313

14+
1415
class GoogleAIStudioTokenCounter:
16+
def _clean_contents_for_gemini_api(self, contents: Any) -> Any:
17+
"""
18+
Clean up contents to remove unsupported fields for the Gemini API.
19+
20+
The Google Gemini API doesn't recognize the 'id' field in function responses,
21+
so we need to remove it to prevent 400 Bad Request errors.
22+
23+
Args:
24+
contents: The contents to clean up
25+
26+
Returns:
27+
Cleaned contents with unsupported fields removed
28+
"""
29+
import copy
30+
31+
from google.genai.types import FunctionResponse
32+
33+
cleaned_contents = copy.deepcopy(contents)
34+
35+
for content in cleaned_contents:
36+
parts = content["parts"]
37+
for part in parts:
38+
if "functionResponse" in part:
39+
function_response_data = part["functionResponse"]
40+
function_response_part = FunctionResponse(**function_response_data)
41+
function_response_part.id = None
42+
part["functionResponse"] = function_response_part.model_dump(
43+
exclude_none=True
44+
)
45+
46+
return cleaned_contents
1547

1648
def _construct_url(self, model: str, api_base: Optional[str] = None) -> str:
1749
"""
@@ -20,7 +52,6 @@ def _construct_url(self, model: str, api_base: Optional[str] = None) -> str:
2052
base_url = api_base or "https://generativelanguage.googleapis.com"
2153
return f"{base_url}/v1beta/models/{model}:countTokens"
2254

23-
2455
async def validate_environment(
2556
self,
2657
api_base: Optional[str] = None,
@@ -33,7 +64,8 @@ async def validate_environment(
3364
Returns a Tuple of headers and url for the Google Gen AI Studio countTokens endpoint.
3465
"""
3566
from litellm.llms.gemini.google_genai.transformation import GoogleGenAIConfig
36-
headers = GoogleGenAIConfig().validate_environment(
67+
68+
headers = GoogleGenAIConfig().validate_environment(
3769
api_key=api_key,
3870
headers=headers,
3971
model=model,
@@ -54,7 +86,7 @@ async def acount_tokens(
5486
) -> Dict[str, Any]:
5587
"""
5688
Count tokens using Google Gen AI Studio countTokens endpoint.
57-
89+
5890
Args:
5991
contents: The content to count tokens for (Google Gen AI format)
6092
Example: [{"parts": [{"text": "Hello world"}]}]
@@ -63,7 +95,7 @@ async def acount_tokens(
6395
api_base: Optional API base URL (defaults to Google Gen AI Studio)
6496
timeout: Optional timeout for the request
6597
**kwargs: Additional parameters
66-
98+
6799
Returns:
68100
Dict containing token count information from Google Gen AI Studio API.
69101
Example response:
@@ -77,14 +109,13 @@ async def acount_tokens(
77109
}
78110
]
79111
}
80-
112+
81113
Raises:
82114
ValueError: If API key is missing
83115
litellm.APIError: If the API call fails
84116
litellm.APIConnectionError: If the connection fails
85117
Exception: For any other unexpected errors
86118
"""
87-
# Set up API base URL
88119

89120
# Prepare headers
90121
headers, url = await self.validate_environment(
@@ -94,44 +125,39 @@ async def acount_tokens(
94125
model=model,
95126
litellm_params=kwargs,
96127
)
97-
98-
# Prepare request body
99-
request_body = {
100-
"contents": contents
101-
}
102-
128+
129+
# Prepare request body - clean up contents to remove unsupported fields
130+
cleaned_contents = self._clean_contents_for_gemini_api(contents)
131+
request_body = {"contents": cleaned_contents}
132+
103133
async_httpx_client = get_async_httpx_client(
104134
llm_provider=LlmProviders.GEMINI,
105135
)
106136

107137
try:
108138
response = await async_httpx_client.post(
109-
url=url,
110-
headers=headers,
111-
json=request_body
139+
url=url, headers=headers, json=request_body
112140
)
113-
141+
114142
# Check for HTTP errors
115143
response.raise_for_status()
116-
144+
117145
# Parse response
118146
result = response.json()
119147
return result
120-
148+
121149
except httpx.HTTPStatusError as e:
122150
error_msg = f"Google Gen AI Studio API error: {e.response.status_code} - {e.response.text}"
123151
raise litellm.APIError(
124152
message=error_msg,
125153
llm_provider="gemini",
126154
model=model,
127-
status_code=e.response.status_code
155+
status_code=e.response.status_code,
128156
) from e
129157
except httpx.RequestError as e:
130158
error_msg = f"Request to Google Gen AI Studio failed: {str(e)}"
131159
raise litellm.APIConnectionError(
132-
message=error_msg,
133-
llm_provider="gemini",
134-
model=model
160+
message=error_msg, llm_provider="gemini", model=model
135161
) from e
136162
except Exception as e:
137163
error_msg = f"Unexpected error during token counting: {str(e)}"

tests/test_litellm/llms/gemini/test_gemini_common_utils.py

Lines changed: 79 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,4 +157,82 @@ async def test_count_tokens(self):
157157
mock_acount_tokens.assert_called_once_with(
158158
model=model_to_use,
159159
contents=contents
160-
)
160+
)
161+
162+
def test_clean_contents_for_gemini_api_removes_id_field(self):
163+
"""Test that _clean_contents_for_gemini_api removes unsupported 'id' field from function responses"""
164+
from litellm.llms.gemini.count_tokens.handler import GoogleAIStudioTokenCounter
165+
166+
token_counter = GoogleAIStudioTokenCounter()
167+
168+
# Test contents with function response containing 'id' field (camelCase)
169+
contents_with_id = [
170+
{
171+
"parts": [
172+
{
173+
"text": "Hello world"
174+
}
175+
],
176+
"role": "user"
177+
},
178+
{
179+
"parts": [
180+
{
181+
"functionResponse": {
182+
"id": "read_many_files-1757526647518-730a691aac11c", # This should be removed
183+
"name": "read_many_files",
184+
"response": {
185+
"output": "No files matching the criteria were found or all were skipped."
186+
}
187+
}
188+
}
189+
],
190+
"role": "user"
191+
}
192+
]
193+
194+
# Clean the contents
195+
cleaned_contents = token_counter._clean_contents_for_gemini_api(contents_with_id)
196+
197+
# Verify the 'id' field was removed
198+
function_response = cleaned_contents[1]["parts"][0]["functionResponse"]
199+
assert "id" not in function_response
200+
assert "name" in function_response
201+
assert "response" in function_response
202+
assert function_response["name"] == "read_many_files"
203+
assert function_response["response"]["output"] == "No files matching the criteria were found or all were skipped."
204+
205+
206+
def test_clean_contents_for_gemini_api_preserves_other_fields(self):
207+
"""Test that _clean_contents_for_gemini_api preserves other fields and structure"""
208+
from litellm.llms.gemini.count_tokens.handler import GoogleAIStudioTokenCounter
209+
210+
token_counter = GoogleAIStudioTokenCounter()
211+
212+
# Test contents without function responses
213+
contents_without_function_response = [
214+
{
215+
"parts": [
216+
{
217+
"text": "This is a regular message"
218+
}
219+
],
220+
"role": "user"
221+
},
222+
{
223+
"parts": [
224+
{
225+
"text": "This is a model response"
226+
}
227+
],
228+
"role": "model"
229+
}
230+
]
231+
232+
# Clean the contents
233+
cleaned_contents = token_counter._clean_contents_for_gemini_api(contents_without_function_response)
234+
235+
# Verify the contents are unchanged
236+
assert cleaned_contents == contents_without_function_response
237+
238+

0 commit comments

Comments
 (0)