File tree Expand file tree Collapse file tree 5 files changed +8
-7
lines changed Expand file tree Collapse file tree 5 files changed +8
-7
lines changed Original file line number Diff line number Diff line change @@ -45,7 +45,9 @@ class LlmRequest(BaseModel):
45
45
contents : list [types .Content ] = Field (default_factory = list )
46
46
"""The contents to send to the model."""
47
47
48
- config : Optional [types .GenerateContentConfig ] = None
48
+ config : types .GenerateContentConfig = Field (
49
+ default_factory = types .GenerateContentConfig
50
+ )
49
51
live_connect_config : types .LiveConnectConfig = types .LiveConnectConfig ()
50
52
"""Additional config for the generate content request.
51
53
Original file line number Diff line number Diff line change @@ -1505,7 +1505,6 @@ async def test_computer_use_with_no_config():
1505
1505
contents = [
1506
1506
types .Content (role = "user" , parts = [types .Part .from_text (text = "Hello" )])
1507
1507
],
1508
- config = None ,
1509
1508
)
1510
1509
1511
1510
# Should not raise an exception
Original file line number Diff line number Diff line change @@ -62,7 +62,7 @@ async def test_process_llm_request_no_declaration():
62
62
tool_context = tool_context , llm_request = llm_request
63
63
)
64
64
65
- assert llm_request .config is None
65
+ assert llm_request .config == types . GenerateContentConfig ()
66
66
67
67
68
68
@pytest .mark .asyncio
Original file line number Diff line number Diff line change @@ -322,12 +322,12 @@ async def test_process_llm_request_with_empty_model_raises_error(self):
322
322
)
323
323
324
324
@pytest .mark .asyncio
325
- async def test_process_llm_request_with_none_config (self ):
325
+ async def test_process_llm_request_with_no_config (self ):
326
326
"""Test processing LLM request with None config."""
327
327
tool = GoogleSearchTool ()
328
328
tool_context = await _create_tool_context ()
329
329
330
- llm_request = LlmRequest (model = 'gemini-2.0-flash' , config = None )
330
+ llm_request = LlmRequest (model = 'gemini-2.0-flash' )
331
331
332
332
await tool .process_llm_request (
333
333
tool_context = tool_context , llm_request = llm_request
Original file line number Diff line number Diff line change @@ -242,12 +242,12 @@ async def test_process_llm_request_with_empty_model_raises_error(self):
242
242
)
243
243
244
244
@pytest .mark .asyncio
245
- async def test_process_llm_request_with_none_config (self ):
245
+ async def test_process_llm_request_with_no_config (self ):
246
246
"""Test processing LLM request with None config."""
247
247
tool = UrlContextTool ()
248
248
tool_context = await _create_tool_context ()
249
249
250
- llm_request = LlmRequest (model = 'gemini-2.0-flash' , config = None )
250
+ llm_request = LlmRequest (model = 'gemini-2.0-flash' )
251
251
252
252
await tool .process_llm_request (
253
253
tool_context = tool_context , llm_request = llm_request
You can’t perform that action at this time.
0 commit comments