@@ -220,6 +220,146 @@ Once the run finishes, `agent_run.final_result` becomes a [`AgentRunResult`][pyd
220
220
221
221
---
222
222
223
+ ### Streaming
224
+
225
+ Here is an example of streaming an agent run in combination with ` async for ` iteration:
226
+
227
+ ``` python {title="streaming.py"}
228
+ import asyncio
229
+ from dataclasses import dataclass
230
+ from datetime import date
231
+
232
+ from pydantic_ai import Agent
233
+ from pydantic_ai.messages import (
234
+ FinalResultEvent,
235
+ FunctionToolCallEvent,
236
+ FunctionToolResultEvent,
237
+ PartDeltaEvent,
238
+ PartStartEvent,
239
+ TextPartDelta,
240
+ ToolCallPartDelta,
241
+ )
242
+ from pydantic_ai.tools import RunContext
243
+
244
+
245
+ @dataclass
246
+ class WeatherService :
247
+ async def get_forecast (self , location : str , forecast_date : date) -> str :
248
+ # In real code: call weather API, DB queries, etc.
249
+ return f ' The forecast in { location} on { forecast_date} is 24°C and sunny. '
250
+
251
+ async def get_historic_weather (self , location : str , forecast_date : date) -> str :
252
+ # In real code: call a historical weather API or DB
253
+ return (
254
+ f ' The weather in { location} on { forecast_date} was 18°C and partly cloudy. '
255
+ )
256
+
257
+
258
+ weather_agent = Agent[WeatherService, str ](
259
+ ' openai:gpt-4o' ,
260
+ deps_type = WeatherService,
261
+ result_type = str , # We'll produce a final answer as plain text
262
+ system_prompt = ' Providing a weather forecast at the locations the user provides.' ,
263
+ )
264
+
265
+
266
+ @weather_agent.tool
267
+ async def weather_forecast (
268
+ ctx : RunContext[WeatherService],
269
+ location : str ,
270
+ forecast_date : date,
271
+ ) -> str :
272
+ if forecast_date >= date.today():
273
+ return await ctx.deps.get_forecast(location, forecast_date)
274
+ else :
275
+ return await ctx.deps.get_historic_weather(location, forecast_date)
276
+
277
+
278
+ output_messages: list[str ] = []
279
+
280
+
281
+ async def main ():
282
+ user_prompt = ' What will the weather be like in Paris on Tuesday?'
283
+
284
+ # Begin a node-by-node, streaming iteration
285
+ with weather_agent.iter(user_prompt, deps = WeatherService()) as run:
286
+ async for node in run:
287
+ if Agent.is_user_prompt_node(node):
288
+ # A user prompt node => The user has provided input
289
+ output_messages.append(f ' === UserPromptNode: { node.user_prompt} === ' )
290
+ elif Agent.is_model_request_node(node):
291
+ # A model request node => We can stream tokens from the model's request
292
+ output_messages.append(
293
+ ' === ModelRequestNode: streaming partial request tokens ==='
294
+ )
295
+ async with node.stream(run.ctx) as request_stream:
296
+ async for event in request_stream:
297
+ if isinstance (event, PartStartEvent):
298
+ output_messages.append(
299
+ f ' [Request] Starting part { event.index} : { event.part!r } '
300
+ )
301
+ elif isinstance (event, PartDeltaEvent):
302
+ if isinstance (event.delta, TextPartDelta):
303
+ output_messages.append(
304
+ f ' [Request] Part { event.index} text delta: { event.delta.content_delta!r } '
305
+ )
306
+ elif isinstance (event.delta, ToolCallPartDelta):
307
+ output_messages.append(
308
+ f ' [Request] Part { event.index} args_delta= { event.delta.args_delta} '
309
+ )
310
+ elif isinstance (event, FinalResultEvent):
311
+ output_messages.append(
312
+ f ' [Result] The model produced a final result (tool_name= { event.tool_name} ) '
313
+ )
314
+ elif Agent.is_handle_response_node(node):
315
+ # A handle-response node => The model returned some data, potentially calls a tool
316
+ output_messages.append(
317
+ ' === HandleResponseNode: streaming partial response & tool usage ==='
318
+ )
319
+ async with node.stream(run.ctx) as handle_stream:
320
+ async for event in handle_stream:
321
+ if isinstance (event, FunctionToolCallEvent):
322
+ output_messages.append(
323
+ f ' [Tools] The LLM calls tool= { event.part.tool_name!r } with args= { event.part.args} (tool_call_id= { event.part.tool_call_id!r } ) '
324
+ )
325
+ elif isinstance (event, FunctionToolResultEvent):
326
+ output_messages.append(
327
+ f ' [Tools] Tool call { event.tool_call_id!r } returned => { event.result.content} '
328
+ )
329
+ elif Agent.is_end_node(node):
330
+ assert run.result.data == node.data.data
331
+ # Once an End node is reached, the agent run is complete
332
+ output_messages.append(f ' === Final Agent Output: { run.result.data} === ' )
333
+
334
+
335
+ if __name__ == ' __main__' :
336
+ asyncio.run(main())
337
+
338
+ print (output_messages)
339
+ """
340
+ [
341
+ '=== ModelRequestNode: streaming partial request tokens ===',
342
+ '[Request] Starting part 0: ToolCallPart(tool_name=\' weather_forecast\' , args=\' {"location":"Pa\' , tool_call_id=\' 0001\' , part_kind=\' tool-call\' )',
343
+ '[Request] Part 0 args_delta=ris","forecast_',
344
+ '[Request] Part 0 args_delta=date":"2030-01-',
345
+ '[Request] Part 0 args_delta=01"}',
346
+ '=== HandleResponseNode: streaming partial response & tool usage ===',
347
+ '[Tools] The LLM calls tool=\' weather_forecast\' with args={"location":"Paris","forecast_date":"2030-01-01"} (tool_call_id=\' 0001\' )',
348
+ "[Tools] Tool call '0001' returned => The forecast in Paris on 2030-01-01 is 24°C and sunny.",
349
+ '=== ModelRequestNode: streaming partial request tokens ===',
350
+ "[Request] Starting part 0: TextPart(content='It will be ', part_kind='text')",
351
+ '[Result] The model produced a final result (tool_name=None)',
352
+ "[Request] Part 0 text delta: 'warm and sunny '",
353
+ "[Request] Part 0 text delta: 'in Paris on '",
354
+ "[Request] Part 0 text delta: 'Tuesday.'",
355
+ '=== HandleResponseNode: streaming partial response & tool usage ===',
356
+ '=== Final Agent Output: It will be warm and sunny in Paris on Tuesday. ===',
357
+ ]
358
+ """
359
+ ```
360
+
361
+ ---
362
+
223
363
### Additional Configuration
224
364
225
365
#### Usage Limits
0 commit comments