Skip to content

Commit c7aaca9

Browse files
committed
Improve deferred tools metadata documentation example
- Replace contrived task_id parameter with realistic tool signatures - Add ComputeDeps class demonstrating dependency injection pattern - Show using ctx.deps to compute metadata from tool arguments - Remove incorrect statement about backwards compatibility - Update test_examples.py to match new realistic example
1 parent 42cf5b8 commit c7aaca9

File tree

2 files changed

+97
-57
lines changed

2 files changed

+97
-57
lines changed

docs/deferred-tools.md

Lines changed: 77 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -326,16 +326,20 @@ _(This example is complete, it can be run "as is" — you'll need to add `asynci
326326

327327
Both [`CallDeferred`][pydantic_ai.exceptions.CallDeferred] and [`ApprovalRequired`][pydantic_ai.exceptions.ApprovalRequired] exceptions accept an optional `metadata` parameter that allows you to attach arbitrary context information to deferred tool calls. This metadata is then available in the [`DeferredToolRequests.metadata`][pydantic_ai.tools.DeferredToolRequests.metadata] dictionary, keyed by the tool call ID.
328328

329+
A common pattern is to use [`RunContext`][pydantic_ai.tools.RunContext] to access application dependencies (databases, APIs, calculators) and compute metadata based on the tool arguments and current context. This allows you to provide rich information for approval decisions or external task tracking.
330+
329331
Common use cases for metadata include:
330332

331-
- Providing cost estimates or time estimates for approval decisions
332-
- Including task IDs or tracking information for external execution
333-
- Storing context about why approval is required
334-
- Attaching priority or urgency information
333+
- Computing cost estimates based on tool arguments and dependency services
334+
- Including job IDs or tracking information for external execution systems
335+
- Storing approval context like user permissions or resource availability
336+
- Attaching priority levels computed from current system state
335337

336-
Here's an example showing how to use metadata with both approval-required and external tools:
338+
Here's an example showing how to use metadata with deps to make informed approval decisions:
337339

338340
```python {title="deferred_tools_with_metadata.py"}
341+
from dataclasses import dataclass
342+
339343
from pydantic_ai import (
340344
Agent,
341345
ApprovalRequired,
@@ -347,91 +351,113 @@ from pydantic_ai import (
347351
ToolDenied,
348352
)
349353

350-
agent = Agent('openai:gpt-5', output_type=[str, DeferredToolRequests])
354+
355+
@dataclass
356+
class ComputeDeps:
357+
"""Dependencies providing cost estimation and job scheduling."""
358+
359+
def estimate_cost(self, dataset: str, model_type: str) -> float:
360+
# In real code, query pricing API or database
361+
costs = {'gpt-4': 50.0, 'gpt-3.5': 10.0}
362+
return costs.get(model_type, 25.0)
363+
364+
def estimate_duration(self, dataset: str) -> int:
365+
# In real code, estimate based on dataset size
366+
return 30 if dataset == 'large_dataset' else 5
367+
368+
def submit_job(self, dataset: str, model_type: str) -> str:
369+
# In real code, submit to batch processing system
370+
return f'job_{dataset}_{model_type}'
371+
372+
373+
agent = Agent(
374+
'openai:gpt-5',
375+
deps_type=ComputeDeps,
376+
output_type=[str, DeferredToolRequests],
377+
)
351378

352379

353380
@agent.tool
354-
def expensive_compute(ctx: RunContext, task_id: str) -> str:
381+
def train_model(ctx: RunContext[ComputeDeps], dataset: str, model_type: str) -> str:
382+
"""Train ML model - requires approval for expensive models."""
355383
if not ctx.tool_call_approved:
384+
# Use deps to compute actual estimates based on args
385+
cost = ctx.deps.estimate_cost(dataset, model_type)
386+
duration = ctx.deps.estimate_duration(dataset)
387+
356388
raise ApprovalRequired(
357389
metadata={
358-
'task_id': task_id,
359-
'estimated_cost_usd': 25.50,
360-
'estimated_time_minutes': 15,
361-
'reason': 'High compute cost',
390+
'dataset': dataset,
391+
'model_type': model_type,
392+
'estimated_cost_usd': cost,
393+
'estimated_duration_minutes': duration,
362394
}
363395
)
364-
return f'Task {task_id} completed'
396+
397+
return f'Model {model_type} trained on {dataset}'
365398

366399

367400
@agent.tool
368-
async def external_api_call(ctx: RunContext, endpoint: str) -> str:
369-
# Schedule the external API call and defer execution
370-
task_id = f'api_call_{ctx.tool_call_id}'
401+
def process_dataset(ctx: RunContext[ComputeDeps], dataset: str, operation: str) -> str:
402+
"""Process dataset in external batch system."""
403+
# Submit job and defer execution
404+
job_id = ctx.deps.submit_job(dataset, operation)
371405

372406
raise CallDeferred(
373407
metadata={
374-
'task_id': task_id,
375-
'endpoint': endpoint,
376-
'priority': 'high',
408+
'job_id': job_id,
409+
'dataset': dataset,
410+
'operation': operation,
377411
}
378412
)
379413

380414

381-
result = agent.run_sync('Run expensive task-123 and call the /data endpoint')
415+
deps = ComputeDeps()
416+
result = agent.run_sync(
417+
'Train gpt-4 on large_dataset and process large_dataset with transform',
418+
deps=deps,
419+
)
382420
messages = result.all_messages()
383421

384422
assert isinstance(result.output, DeferredToolRequests)
385423
requests = result.output
386424

387-
# Handle approvals with metadata
388-
for call in requests.approvals:
389-
metadata = requests.metadata.get(call.tool_call_id, {})
390-
print(f'Approval needed for {call.tool_name}')
391-
#> Approval needed for expensive_compute
392-
print(f' Cost: ${metadata.get("estimated_cost_usd")}')
393-
#> Cost: $25.5
394-
print(f' Time: {metadata.get("estimated_time_minutes")} minutes')
395-
#> Time: 15 minutes
396-
print(f' Reason: {metadata.get("reason")}')
397-
#> Reason: High compute cost
398-
399-
# Handle external calls with metadata
400-
for call in requests.calls:
401-
metadata = requests.metadata.get(call.tool_call_id, {})
402-
print(f'External call to {call.tool_name}')
403-
#> External call to external_api_call
404-
print(f' Task ID: {metadata.get("task_id")}')
405-
#> Task ID: api_call_external_api_call
406-
print(f' Priority: {metadata.get("priority")}')
407-
#> Priority: high
408-
409-
# Build results with approvals and external results
425+
# Make approval decisions based on metadata
410426
results = DeferredToolResults()
411427
for call in requests.approvals:
412428
metadata = requests.metadata.get(call.tool_call_id, {})
413429
cost = metadata.get('estimated_cost_usd', 0)
414430

415-
if cost < 50: # Approve if cost is under $50
431+
print(f'Approval needed: {call.tool_name}')
432+
#> Approval needed: train_model
433+
print(f' Model: {metadata.get("model_type")}, Cost: ${cost}')
434+
#> Model: gpt-4, Cost: $50.0
435+
436+
if cost < 100:
416437
results.approvals[call.tool_call_id] = ToolApproved()
417438
else:
418-
results.approvals[call.tool_call_id] = ToolDenied('Cost too high')
439+
results.approvals[call.tool_call_id] = ToolDenied('Cost exceeds limit')
419440

441+
# Process external jobs using metadata
420442
for call in requests.calls:
421443
metadata = requests.metadata.get(call.tool_call_id, {})
422-
# Simulate getting result from external task
423-
task_id = metadata.get('task_id')
424-
results.calls[call.tool_call_id] = f'Result from {task_id}: success'
444+
job_id = metadata.get('job_id')
425445

426-
result = agent.run_sync(message_history=messages, deferred_tool_results=results)
446+
print(f'External job: {job_id}')
447+
#> External job: job_large_dataset_transform
448+
449+
# In real code, poll job status and get result
450+
results.calls[call.tool_call_id] = f'Completed {job_id}'
451+
452+
result = agent.run_sync(message_history=messages, deferred_tool_results=results, deps=deps)
427453
print(result.output)
428-
#> I completed task-123 and retrieved data from the /data endpoint.
454+
"""
455+
Model gpt-4 trained on large_dataset and dataset processing job job_large_dataset_transform completed
456+
"""
429457
```
430458

431459
_(This example is complete, it can be run "as is")_
432460

433-
The metadata dictionary can contain any JSON-serializable values and is entirely application-defined. If no metadata is provided when raising the exception, the tool call ID will still be present in the `metadata` dictionary with an empty dict as the value for backward compatibility.
434-
435461
## See Also
436462

437463
- [Function Tools](tools.md) - Basic tool concepts and registration

tests/test_examples.py

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -523,9 +523,17 @@ async def call_tool(
523523
'Tell me about the pydantic/pydantic-ai repo.': 'The pydantic/pydantic-ai repo is a Python agent framework for building Generative AI applications.',
524524
'What do I have on my calendar today?': "You're going to spend all day playing with Pydantic AI.",
525525
'Write a long story about a cat': 'Once upon a time, there was a curious cat named Whiskers who loved to explore the world around him...',
526-
'Run expensive task-123 and call the /data endpoint': [
527-
ToolCallPart(tool_name='expensive_compute', args={'task_id': 'task-123'}, tool_call_id='expensive_compute'),
528-
ToolCallPart(tool_name='external_api_call', args={'endpoint': '/data'}, tool_call_id='external_api_call'),
526+
'Train gpt-4 on large_dataset and process large_dataset with transform': [
527+
ToolCallPart(
528+
tool_name='train_model',
529+
args={'dataset': 'large_dataset', 'model_type': 'gpt-4'},
530+
tool_call_id='train_model',
531+
),
532+
ToolCallPart(
533+
tool_name='process_dataset',
534+
args={'dataset': 'large_dataset', 'operation': 'transform'},
535+
tool_call_id='process_dataset',
536+
),
529537
],
530538
}
531539

@@ -875,11 +883,17 @@ async def model_logic( # noqa: C901
875883
return ModelResponse(
876884
parts=[TextPart('The answer to the ultimate question of life, the universe, and everything is 42.')]
877885
)
878-
elif isinstance(m, ToolReturnPart) and m.tool_name in ('expensive_compute', 'external_api_call'):
886+
elif isinstance(m, ToolReturnPart) and m.tool_name in ('train_model', 'process_dataset'):
879887
# After deferred tools complete, check if we have all results to provide final response
880888
tool_names = {part.tool_name for msg in messages for part in msg.parts if isinstance(part, ToolReturnPart)}
881-
if 'expensive_compute' in tool_names and 'external_api_call' in tool_names:
882-
return ModelResponse(parts=[TextPart('I completed task-123 and retrieved data from the /data endpoint.')])
889+
if 'train_model' in tool_names and 'process_dataset' in tool_names:
890+
return ModelResponse(
891+
parts=[
892+
TextPart(
893+
'Model gpt-4 trained on large_dataset and dataset processing job job_large_dataset_transform completed'
894+
)
895+
]
896+
)
883897
# If we don't have both results yet, just acknowledge the tool result
884898
return ModelResponse(parts=[TextPart(f'Received result from {m.tool_name}')])
885899

0 commit comments

Comments
 (0)