Skip to content

Commit aa1fe33

Browse files
Fix error when Google returns only empty text parts (#3388)
Co-authored-by: Douwe Maan <[email protected]>
1 parent 60fd423 commit aa1fe33

File tree

2 files changed

+74
-9
lines changed

2 files changed

+74
-9
lines changed

pydantic_ai_slim/pydantic_ai/models/google.py

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -677,13 +677,18 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
677677
provider_name=self.provider_name,
678678
)
679679

680-
if part.text:
681-
if part.thought:
682-
yield self._parts_manager.handle_thinking_delta(vendor_part_id='thinking', content=part.text)
683-
else:
684-
maybe_event = self._parts_manager.handle_text_delta(vendor_part_id='content', content=part.text)
685-
if maybe_event is not None: # pragma: no branch
686-
yield maybe_event
680+
if part.text is not None:
681+
if len(part.text) > 0:
682+
if part.thought:
683+
yield self._parts_manager.handle_thinking_delta(
684+
vendor_part_id='thinking', content=part.text
685+
)
686+
else:
687+
maybe_event = self._parts_manager.handle_text_delta(
688+
vendor_part_id='content', content=part.text
689+
)
690+
if maybe_event is not None: # pragma: no branch
691+
yield maybe_event
687692
elif part.function_call:
688693
maybe_event = self._parts_manager.handle_tool_call_delta(
689694
vendor_part_id=uuid4(),
@@ -822,7 +827,10 @@ def _process_response_from_parts(
822827
elif part.code_execution_result is not None:
823828
assert code_execution_tool_call_id is not None
824829
item = _map_code_execution_result(part.code_execution_result, provider_name, code_execution_tool_call_id)
825-
elif part.text:
830+
elif part.text is not None:
831+
# Google sometimes sends empty text parts, we don't want to add them to the response
832+
if len(part.text) == 0:
833+
continue
826834
if part.thought:
827835
item = ThinkingPart(content=part.text)
828836
else:

tests/models/test_google.py

Lines changed: 58 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import datetime
44
import os
55
import re
6+
from collections.abc import AsyncIterator
67
from typing import Any
78

89
import pytest
@@ -47,6 +48,7 @@
4748
BuiltinToolCallEvent, # pyright: ignore[reportDeprecated]
4849
BuiltinToolResultEvent, # pyright: ignore[reportDeprecated]
4950
)
51+
from pydantic_ai.models import ModelRequestParameters
5052
from pydantic_ai.output import NativeOutput, PromptedOutput, TextOutput, ToolOutput
5153
from pydantic_ai.settings import ModelSettings
5254
from pydantic_ai.usage import RequestUsage, RunUsage, UsageLimits
@@ -56,6 +58,7 @@
5658

5759
with try_import() as imports_successful:
5860
from google.genai.types import (
61+
FinishReason as GoogleFinishReason,
5962
GenerateContentResponse,
6063
GenerateContentResponseUsageMetadata,
6164
HarmBlockThreshold,
@@ -64,7 +67,12 @@
6467
ModalityTokenCount,
6568
)
6669

67-
from pydantic_ai.models.google import GoogleModel, GoogleModelSettings, _metadata_as_usage # type: ignore
70+
from pydantic_ai.models.google import (
71+
GeminiStreamedResponse,
72+
GoogleModel,
73+
GoogleModelSettings,
74+
_metadata_as_usage, # pyright: ignore[reportPrivateUsage]
75+
)
6876
from pydantic_ai.models.openai import OpenAIResponsesModel, OpenAIResponsesModelSettings
6977
from pydantic_ai.providers.google import GoogleProvider
7078
from pydantic_ai.providers.openai import OpenAIProvider
@@ -3063,3 +3071,52 @@ async def test_google_httpx_client_is_not_closed(allow_model_requests: None, gem
30633071
agent = Agent(GoogleModel('gemini-2.5-flash-lite', provider=GoogleProvider(api_key=gemini_api_key)))
30643072
result = await agent.run('What is the capital of Mexico?')
30653073
assert result.output == snapshot('The capital of Mexico is **Mexico City**.')
3074+
3075+
3076+
def test_google_process_response_filters_empty_text_parts(google_provider: GoogleProvider):
3077+
model = GoogleModel('gemini-2.5-pro', provider=google_provider)
3078+
response = _generate_response_with_texts(response_id='resp-123', texts=['', 'first', '', 'second'])
3079+
3080+
result = model._process_response(response) # pyright: ignore[reportPrivateUsage]
3081+
3082+
assert result.parts == snapshot([TextPart(content='first'), TextPart(content='second')])
3083+
3084+
3085+
async def test_gemini_streamed_response_emits_text_events_for_non_empty_parts():
3086+
chunk = _generate_response_with_texts('stream-1', ['', 'streamed text'])
3087+
3088+
async def response_iterator() -> AsyncIterator[GenerateContentResponse]:
3089+
yield chunk
3090+
3091+
streamed_response = GeminiStreamedResponse(
3092+
model_request_parameters=ModelRequestParameters(),
3093+
_model_name='gemini-test',
3094+
_response=response_iterator(),
3095+
_timestamp=datetime.datetime.now(datetime.timezone.utc),
3096+
_provider_name='test-provider',
3097+
)
3098+
3099+
events = [event async for event in streamed_response._get_event_iterator()] # pyright: ignore[reportPrivateUsage]
3100+
assert events == snapshot([PartStartEvent(index=0, part=TextPart(content='streamed text'))])
3101+
3102+
3103+
def _generate_response_with_texts(response_id: str, texts: list[str]) -> GenerateContentResponse:
3104+
return GenerateContentResponse.model_validate(
3105+
{
3106+
'response_id': response_id,
3107+
'model_version': 'gemini-test',
3108+
'usage_metadata': GenerateContentResponseUsageMetadata(
3109+
prompt_token_count=0,
3110+
candidates_token_count=0,
3111+
),
3112+
'candidates': [
3113+
{
3114+
'finish_reason': GoogleFinishReason.STOP,
3115+
'content': {
3116+
'role': 'model',
3117+
'parts': [{'text': text} for text in texts],
3118+
},
3119+
}
3120+
],
3121+
}
3122+
)

0 commit comments

Comments
 (0)