Skip to content

Commit fde5eea

Browse files
authored
Drop assertion on Google streaming (#2618)
1 parent c959ee1 commit fde5eea

File tree

3 files changed

+7
-6
lines changed

3 files changed

+7
-6
lines changed

pydantic_ai_slim/pydantic_ai/models/google.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ async def count_tokens(
219219
)
220220
if self._provider.name != 'google-gla':
221221
# The fields are not supported by the Gemini API per https://github.com/googleapis/python-genai/blob/7e4ec284dc6e521949626f3ed54028163ef9121d/google/genai/models.py#L1195-L1214
222-
config.update(
222+
config.update( # pragma: lax no cover
223223
system_instruction=generation_config.get('system_instruction'),
224224
tools=cast(list[ToolDict], generation_config.get('tools')),
225225
# Annoyingly, GenerationConfigDict has fewer fields than GenerateContentConfigDict, and no extra fields are allowed.
@@ -535,9 +535,8 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
535535
raise UnexpectedModelBehavior('Safety settings triggered', str(chunk))
536536
else: # pragma: no cover
537537
raise UnexpectedModelBehavior('Content field missing from streaming Gemini response', str(chunk))
538-
539-
assert candidate.content.parts is not None
540-
for part in candidate.content.parts:
538+
parts = candidate.content.parts or []
539+
for part in parts:
541540
if part.text is not None:
542541
if part.thought:
543542
yield self._parts_manager.handle_thinking_delta(vendor_part_id='thinking', content=part.text)

pydantic_ai_slim/pydantic_ai/usage.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -292,7 +292,7 @@ def check_before_request(self, usage: RunUsage) -> None:
292292

293293
total_tokens = usage.total_tokens
294294
if self.total_tokens_limit is not None and total_tokens > self.total_tokens_limit:
295-
raise UsageLimitExceeded(
295+
raise UsageLimitExceeded( # pragma: lax no cover
296296
f'The next request would exceed the total_tokens_limit of {self.total_tokens_limit} ({total_tokens=})'
297297
)
298298

tests/models/test_google.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1670,7 +1670,9 @@ async def test_google_model_usage_limit_not_exceeded(allow_model_requests: None,
16701670
""")
16711671

16721672

1673-
async def test_google_vertexai_model_usage_limit_exceeded(allow_model_requests: None, vertex_provider: GoogleProvider):
1673+
async def test_google_vertexai_model_usage_limit_exceeded(
1674+
allow_model_requests: None, vertex_provider: GoogleProvider
1675+
): # pragma: lax no cover
16741676
model = GoogleModel('gemini-2.0-flash', provider=vertex_provider, settings=ModelSettings(max_tokens=100))
16751677

16761678
agent = Agent(model, system_prompt='You are a chatbot.')

0 commit comments

Comments
 (0)