Skip to content

Commit 3f3efea

Browse files
test(test_gemini.py): add additional testing for additionalproperties case
1 parent 258b674 commit 3f3efea

File tree

3 files changed

+96
-34
lines changed

3 files changed

+96
-34
lines changed

litellm/types/llms/vertex_ai.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,6 @@ class Schema(TypedDict, total=False):
113113
pattern: str
114114
example: Any
115115
anyOf: List["Schema"]
116-
additionalProperties: Any
117116

118117

119118
class FunctionDeclaration(TypedDict, total=False):

tests/llm_translation/test_gemini.py

Lines changed: 59 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,11 @@ def test_gemini_image_generation():
269269
assert len(response.choices[0].message.images) > 0
270270
assert response.choices[0].message.images[0]["image_url"] is not None
271271
assert response.choices[0].message.images[0]["image_url"]["url"] is not None
272-
assert response.choices[0].message.images[0]["image_url"]["url"].startswith("data:image/png;base64,")
272+
assert (
273+
response.choices[0]
274+
.message.images[0]["image_url"]["url"]
275+
.startswith("data:image/png;base64,")
276+
)
273277

274278

275279
def test_gemini_thinking():
@@ -661,7 +665,8 @@ def test_system_message_with_no_user_message():
661665
assert response is not None
662666

663667
assert response.choices[0].message.content is not None
664-
668+
669+
665670
def get_current_weather(location, unit="fahrenheit"):
666671
"""Get the current weather in a given location"""
667672
if "tokyo" in location.lower():
@@ -778,9 +783,9 @@ def test_gemini_reasoning_effort_minimal():
778783

779784
# Test with different Gemini models to verify model-specific mapping
780785
test_cases = [
781-
("gemini/gemini-2.5-flash", 1), # Flash: minimum 1 token
782-
("gemini/gemini-2.5-pro", 128), # Pro: minimum 128 tokens
783-
("gemini/gemini-2.5-flash-lite", 512), # Flash-Lite: minimum 512 tokens
786+
("gemini/gemini-2.5-flash", 1), # Flash: minimum 1 token
787+
("gemini/gemini-2.5-pro", 128), # Pro: minimum 128 tokens
788+
("gemini/gemini-2.5-flash-lite", 512), # Flash-Lite: minimum 512 tokens
784789
]
785790

786791
for model, expected_min_budget in test_cases:
@@ -793,24 +798,32 @@ def test_gemini_reasoning_effort_minimal():
793798
"reasoning_effort": "minimal",
794799
},
795800
)
796-
801+
797802
# Verify that the thinking config is set correctly
798803
request_body = raw_request["raw_request_body"]
799-
assert "generationConfig" in request_body, f"Model {model} should have generationConfig"
800-
804+
assert (
805+
"generationConfig" in request_body
806+
), f"Model {model} should have generationConfig"
807+
801808
generation_config = request_body["generationConfig"]
802-
assert "thinkingConfig" in generation_config, f"Model {model} should have thinkingConfig"
803-
809+
assert (
810+
"thinkingConfig" in generation_config
811+
), f"Model {model} should have thinkingConfig"
812+
804813
thinking_config = generation_config["thinkingConfig"]
805-
assert "thinkingBudget" in thinking_config, f"Model {model} should have thinkingBudget"
806-
814+
assert (
815+
"thinkingBudget" in thinking_config
816+
), f"Model {model} should have thinkingBudget"
817+
807818
actual_budget = thinking_config["thinkingBudget"]
808-
assert actual_budget == expected_min_budget, \
809-
f"Model {model} should map 'minimal' to {expected_min_budget} tokens, got {actual_budget}"
810-
819+
assert (
820+
actual_budget == expected_min_budget
821+
), f"Model {model} should map 'minimal' to {expected_min_budget} tokens, got {actual_budget}"
822+
811823
# Verify that includeThoughts is True for minimal reasoning effort
812-
assert thinking_config.get("includeThoughts", True), \
813-
f"Model {model} should have includeThoughts=True for minimal reasoning effort"
824+
assert thinking_config.get(
825+
"includeThoughts", True
826+
), f"Model {model} should have includeThoughts=True for minimal reasoning effort"
814827

815828
# Test with unknown model (should use generic fallback)
816829
try:
@@ -822,15 +835,41 @@ def test_gemini_reasoning_effort_minimal():
822835
"reasoning_effort": "minimal",
823836
},
824837
)
825-
838+
826839
request_body = raw_request["raw_request_body"]
827840
generation_config = request_body["generationConfig"]
828841
thinking_config = generation_config["thinkingConfig"]
829842
# Should use generic fallback (128 tokens)
830-
assert thinking_config["thinkingBudget"] == 128, \
831-
"Unknown model should use generic fallback of 128 tokens"
843+
assert (
844+
thinking_config["thinkingBudget"] == 128
845+
), "Unknown model should use generic fallback of 128 tokens"
832846
except Exception as e:
833847
# If return_raw_request doesn't work for unknown models, that's okay
834848
# The important part is that our known models work correctly
835849
print(f"Note: Unknown model test skipped due to: {e}")
836850
pass
851+
852+
853+
def test_gemini_additional_properties_bug():
854+
# Simple tool with additionalProperties (simulating the TypedDict issue)
855+
tools = [
856+
{
857+
"type": "function",
858+
"function": {
859+
"name": "test_tool",
860+
"description": "Test tool",
861+
"parameters": {
862+
"type": "object",
863+
"properties": {"param1": {"type": "string"}},
864+
# This causes the error - any non-False value
865+
"additionalProperties": True, # Could also be None, {}, etc.
866+
},
867+
},
868+
}
869+
]
870+
871+
messages = [{"role": "user", "content": "Test message"}]
872+
873+
response = litellm.completion(
874+
model="gemini/gemini-2.5-flash", messages=messages, tools=tools
875+
)

tests/local_testing/test_amazing_vertex_completion.py

Lines changed: 37 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ async def test_async_vertexai_response():
397397
| litellm.vertex_text_models
398398
| litellm.vertex_code_text_models
399399
)
400-
400+
401401
test_models = random.sample(list(test_models), 1)
402402
test_models += list(litellm.vertex_language_models) # always test gemini-pro
403403
for model in test_models:
@@ -504,7 +504,6 @@ async def test_async_vertexai_streaming_response():
504504
pytest.fail(f"An exception occurred: {e}")
505505

506506

507-
508507
@pytest.mark.parametrize("load_pdf", [False]) # True,
509508
@pytest.mark.flaky(retries=3, delay=1)
510509
def test_completion_function_plus_pdf(load_pdf):
@@ -547,6 +546,7 @@ def test_completion_function_plus_pdf(load_pdf):
547546
except Exception as e:
548547
pytest.fail("Got={}".format(str(e)))
549548

549+
550550
def encode_image(image_path):
551551
import base64
552552

@@ -910,7 +910,10 @@ async def test_partner_models_httpx(model, region, sync_mode):
910910
[
911911
("vertex_ai/meta/llama-4-scout-17b-16e-instruct-maas", "us-east5"),
912912
("vertex_ai/qwen/qwen3-coder-480b-a35b-instruct-maas", "us-south1"),
913-
("vertex_ai/mistral-large-2411", "us-central1"), # critical - we had this issue: https://github.com/BerriAI/litellm/issues/13888
913+
(
914+
"vertex_ai/mistral-large-2411",
915+
"us-central1",
916+
), # critical - we had this issue: https://github.com/BerriAI/litellm/issues/13888
914917
("vertex_ai/openai/gpt-oss-20b-maas", "us-central1"),
915918
],
916919
)
@@ -3827,7 +3830,7 @@ def test_vertex_ai_gemini_audio_ogg():
38273830
@pytest.mark.asyncio
38283831
async def test_vertex_ai_deepseek():
38293832
"""Test that deepseek models use the correct v1 API endpoint instead of v1beta1."""
3830-
#load_vertex_ai_credentials()
3833+
# load_vertex_ai_credentials()
38313834
litellm._turn_on_debug()
38323835
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
38333836

@@ -3840,21 +3843,17 @@ async def test_vertex_ai_deepseek():
38403843
{
38413844
"message": {
38423845
"role": "assistant",
3843-
"content": "Hello! How can I help you today?"
3846+
"content": "Hello! How can I help you today?",
38443847
},
38453848
"index": 0,
3846-
"finish_reason": "stop"
3849+
"finish_reason": "stop",
38473850
}
38483851
],
3849-
"usage": {
3850-
"prompt_tokens": 10,
3851-
"completion_tokens": 20,
3852-
"total_tokens": 30
3853-
},
3854-
"model": "deepseek-ai/deepseek-r1-0528-maas"
3852+
"usage": {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30},
3853+
"model": "deepseek-ai/deepseek-r1-0528-maas",
38553854
}
38563855
mock_response.status_code = 200
3857-
3856+
38583857
with patch.object(client, "post", return_value=mock_response) as mock_post:
38593858
response = await acompletion(
38603859
model="vertex_ai/deepseek-ai/deepseek-r1-0528-maas",
@@ -3900,3 +3899,28 @@ def test_gemini_grounding_on_streaming():
39003899
vertex_ai_grounding_metadata_shows_up = True
39013900
print(chunk)
39023901
assert vertex_ai_grounding_metadata_shows_up
3902+
3903+
3904+
def test_gemini_additional_properties_bug():
3905+
# Simple tool with additionalProperties (simulating the TypedDict issue)
3906+
tools = [
3907+
{
3908+
"type": "function",
3909+
"function": {
3910+
"name": "test_tool",
3911+
"description": "Test tool",
3912+
"parameters": {
3913+
"type": "object",
3914+
"properties": {"param1": {"type": "string"}},
3915+
# This causes the error - any non-False value
3916+
"additionalProperties": True, # Could also be None, {}, etc.
3917+
},
3918+
},
3919+
}
3920+
]
3921+
3922+
messages = [{"role": "user", "content": "Test message"}]
3923+
3924+
response = litellm.completion(
3925+
model="gemini/gemini-2.5-flash", messages=messages, tools=tools
3926+
)

0 commit comments

Comments
 (0)