@@ -574,6 +574,42 @@ async def test_stream_native_output(allow_model_requests: None):
574
574
assert result .is_complete
575
575
576
576
577
+ async def test_stream_tool_call_with_empty_text (allow_model_requests : None ):
578
+ stream = [
579
+ chunk (
580
+ [
581
+ ChoiceDelta (
582
+ content = '' , # Ollama will include an empty text delta even when it's going to call a tool
583
+ tool_calls = [
584
+ ChoiceDeltaToolCall (
585
+ index = 0 , function = ChoiceDeltaToolCallFunction (name = 'final_result' , arguments = None )
586
+ )
587
+ ],
588
+ ),
589
+ ]
590
+ ),
591
+ struc_chunk (None , '{"first": "One' ),
592
+ struc_chunk (None , '", "second": "Two"' ),
593
+ struc_chunk (None , '}' ),
594
+ chunk ([]),
595
+ ]
596
+ mock_client = MockOpenAI .create_mock_stream (stream )
597
+ m = OpenAIModel ('gpt-4o' , provider = OpenAIProvider (openai_client = mock_client ))
598
+ agent = Agent (m , output_type = [str , MyTypedDict ])
599
+
600
+ async with agent .run_stream ('' ) as result :
601
+ assert not result .is_complete
602
+ assert [c async for c in result .stream (debounce_by = None )] == snapshot (
603
+ [
604
+ {'first' : 'One' },
605
+ {'first' : 'One' , 'second' : 'Two' },
606
+ {'first' : 'One' , 'second' : 'Two' },
607
+ {'first' : 'One' , 'second' : 'Two' },
608
+ ]
609
+ )
610
+ assert await result .get_output () == snapshot ({'first' : 'One' , 'second' : 'Two' })
611
+
612
+
577
613
async def test_no_content (allow_model_requests : None ):
578
614
stream = [chunk ([ChoiceDelta ()]), chunk ([ChoiceDelta ()])]
579
615
mock_client = MockOpenAI .create_mock_stream (stream )
0 commit comments