Skip to content

Commit 420166d

Browse files
authored
fix: allow empty content for TextPart on GoogleModel (#2203)
1 parent 479b346 commit 420166d

File tree

3 files changed

+76
-2
lines changed

3 files changed

+76
-2
lines changed

pydantic_ai_slim/pydantic_ai/models/google.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -484,8 +484,7 @@ def _content_model_response(m: ModelResponse) -> ContentDict:
484484
function_call = FunctionCallDict(name=item.tool_name, args=item.args_as_dict(), id=item.tool_call_id)
485485
parts.append({'function_call': function_call})
486486
elif isinstance(item, TextPart):
487-
if item.content: # pragma: no branch
488-
parts.append({'text': item.content})
487+
parts.append({'text': item.content})
489488
elif isinstance(item, ThinkingPart): # pragma: no cover
490489
# NOTE: We don't send ThinkingPart to the providers yet. If you are unsatisfied with this,
491490
# please open an issue. The below code is the code to send thinking to the provider.
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
interactions:
2+
- request:
3+
headers:
4+
content-type:
5+
- application/json
6+
method: post
7+
parsed_body:
8+
contents:
9+
- parts:
10+
- text: Hi
11+
role: user
12+
- parts:
13+
- text: ''
14+
role: model
15+
- parts:
16+
- text: Empty?
17+
role: user
18+
generationConfig: {}
19+
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent
20+
response:
21+
headers:
22+
alt-svc:
23+
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
24+
content-length:
25+
- '724'
26+
content-type:
27+
- application/json; charset=UTF-8
28+
server-timing:
29+
- gfet4t7; dur=387
30+
transfer-encoding:
31+
- chunked
32+
vary:
33+
- Origin
34+
- X-Origin
35+
- Referer
36+
parsed_body:
37+
candidates:
38+
- avgLogprobs: -0.02701732359434429
39+
content:
40+
parts:
41+
- text: |
42+
Yes, your previous message was empty. Is there anything I can help you with?
43+
role: model
44+
finishReason: STOP
45+
modelVersion: gemini-1.5-flash
46+
responseId: NHt4aPycNfzcnvgP1q6EgQw
47+
usageMetadata:
48+
candidatesTokenCount: 19
49+
candidatesTokensDetails:
50+
- modality: TEXT
51+
tokenCount: 19
52+
promptTokenCount: 3
53+
promptTokensDetails:
54+
- modality: TEXT
55+
tokenCount: 3
56+
totalTokenCount: 22
57+
status:
58+
code: 200
59+
message: OK
60+
version: 1

tests/models/test_google.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -604,6 +604,21 @@ async def test_google_model_empty_user_prompt(allow_model_requests: None, google
604604
assert result.output == snapshot("I'm ready to assist you. Please tell me what you need.\n")
605605

606606

607+
async def test_google_model_empty_assistant_response(allow_model_requests: None, google_provider: GoogleProvider):
608+
m = GoogleModel('gemini-1.5-flash', provider=google_provider)
609+
agent = Agent(m)
610+
611+
result = await agent.run(
612+
'Empty?',
613+
message_history=[
614+
ModelRequest(parts=[UserPromptPart(content='Hi')]),
615+
ModelResponse(parts=[TextPart(content='')]),
616+
],
617+
)
618+
619+
assert result.output == snapshot('Yes, your previous message was empty. Is there anything I can help you with?\n')
620+
621+
607622
async def test_google_model_thinking_part(allow_model_requests: None, google_provider: GoogleProvider):
608623
m = GoogleModel('gemini-2.5-pro-preview-03-25', provider=google_provider)
609624
settings = GoogleModelSettings(google_thinking_config={'include_thoughts': True})

0 commit comments

Comments
 (0)