File tree Expand file tree Collapse file tree 2 files changed +8
-4
lines changed
pydantic_ai_slim/pydantic_ai/models Expand file tree Collapse file tree 2 files changed +8
-4
lines changed Original file line number Diff line number Diff line change @@ -586,7 +586,8 @@ async def _process_streamed_response(
586
586
'Streamed response ended without content or tool calls'
587
587
)
588
588
589
- # ChatCompletionChunk.model is required to be set, but Azure OpenAI omits it so we fall back to the model name set by the user.
589
+ # When using Azure OpenAI and a content filter is enabled, the first chunk will contain a `''` model name,
590
+ # so we set it from a later chunk in `OpenAIChatStreamedResponse`.
590
591
model_name = first_chunk .model or self ._model_name
591
592
592
593
return OpenAIStreamedResponse (
@@ -1352,9 +1353,12 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
1352
1353
async for chunk in self ._response :
1353
1354
self ._usage += _map_usage (chunk )
1354
1355
1355
- if chunk .id and self . provider_response_id is None :
1356
+ if chunk .id : # pragma: no branch
1356
1357
self .provider_response_id = chunk .id
1357
1358
1359
+ if chunk .model :
1360
+ self ._model_name = chunk .model
1361
+
1358
1362
try :
1359
1363
choice = chunk .choices [0 ]
1360
1364
except IndexError :
Original file line number Diff line number Diff line change @@ -398,7 +398,7 @@ async def test_stream_text(allow_model_requests: None):
398
398
399
399
async def test_stream_text_finish_reason (allow_model_requests : None ):
400
400
first_chunk = text_chunk ('hello ' )
401
- # Test that we fall back to the model name set by the user if the model name is not set in the first chunk , like on Azure OpenAI.
401
+ # Test that we get the model name from a later chunk if it is not set on the first one , like on Azure OpenAI with content filter enabled .
402
402
first_chunk .model = ''
403
403
stream = [
404
404
first_chunk ,
@@ -421,7 +421,7 @@ async def test_stream_text_finish_reason(allow_model_requests: None):
421
421
ModelResponse (
422
422
parts = [TextPart (content = 'hello world.' )],
423
423
usage = RequestUsage (input_tokens = 6 , output_tokens = 3 ),
424
- model_name = 'gpt-4o' ,
424
+ model_name = 'gpt-4o-123 ' ,
425
425
timestamp = IsDatetime (),
426
426
provider_name = 'openai' ,
427
427
provider_details = {'finish_reason' : 'stop' },
You can’t perform that action at this time.
0 commit comments