Skip to content

Commit f6eec04

Browse files
authored
Merge branch 'main' into bedrock_error
2 parents 5a68e01 + faca9c4 commit f6eec04

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+1970
-746
lines changed

docs/api/models/function.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,14 @@ async def model_function(
3636
print(info)
3737
"""
3838
AgentInfo(
39-
function_tools=[], allow_text_output=True, output_tools=[], model_settings=None
39+
function_tools=[],
40+
allow_text_output=True,
41+
output_tools=[],
42+
model_settings=None,
43+
model_request_parameters=ModelRequestParameters(
44+
function_tools=[], builtin_tools=[], output_tools=[]
45+
),
46+
instructions=None,
4047
)
4148
"""
4249
return ModelResponse(parts=[TextPart('hello world')])

docs/builtin-tools.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ making it ideal for queries that require up-to-date data.
3131
|----------|-----------|-------|
3232
| OpenAI Responses || Full feature support. To include search results on the [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] that's available via [`ModelResponse.builtin_tool_calls`][pydantic_ai.messages.ModelResponse.builtin_tool_calls], enable the [`OpenAIResponsesModelSettings.openai_include_web_search_sources`][pydantic_ai.models.openai.OpenAIResponsesModelSettings.openai_include_web_search_sources] [model setting](agents.md#model-run-settings). |
3333
| Anthropic || Full feature support |
34-
| Google || No parameter support. No [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] or [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] is generated when streaming. Using built-in tools and user tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
34+
| Google || No parameter support. No [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] or [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] is generated when streaming. Using built-in tools and function tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
3535
| Groq || Limited parameter support. To use web search capabilities with Groq, you need to use the [compound models](https://console.groq.com/docs/compound). |
3636
| OpenAI Chat Completions || Not supported |
3737
| Bedrock || Not supported |
@@ -123,7 +123,7 @@ in a secure environment, making it perfect for computational tasks, data analysi
123123
| Provider | Supported | Notes |
124124
|----------|-----------|-------|
125125
| OpenAI || To include code execution output on the [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] that's available via [`ModelResponse.builtin_tool_calls`][pydantic_ai.messages.ModelResponse.builtin_tool_calls], enable the [`OpenAIResponsesModelSettings.openai_include_code_execution_outputs`][pydantic_ai.models.openai.OpenAIResponsesModelSettings.openai_include_code_execution_outputs] [model setting](agents.md#model-run-settings). If the code execution generated images, like charts, they will be available on [`ModelResponse.images`][pydantic_ai.messages.ModelResponse.images] as [`BinaryImage`][pydantic_ai.messages.BinaryImage] objects. The generated image can also be used as [image output](output.md#image-output) for the agent run. |
126-
| Google || Using built-in tools and user tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
126+
| Google || Using built-in tools and function tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
127127
| Anthropic || |
128128
| Groq || |
129129
| Bedrock || |
@@ -315,7 +315,7 @@ allowing it to pull up-to-date information from the web.
315315

316316
| Provider | Supported | Notes |
317317
|----------|-----------|-------|
318-
| Google || No [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] or [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] is currently generated; please submit an issue if you need this. Using built-in tools and user tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
318+
| Google || No [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] or [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] is currently generated; please submit an issue if you need this. Using built-in tools and function tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
319319
| OpenAI || |
320320
| Anthropic || |
321321
| Groq || |

docs/deferred-tools.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -142,18 +142,18 @@ print(result.all_messages())
142142
),
143143
ModelRequest(
144144
parts=[
145-
ToolReturnPart(
146-
tool_name='delete_file',
147-
content='Deleting files is not allowed',
148-
tool_call_id='delete_file',
149-
timestamp=datetime.datetime(...),
150-
),
151145
ToolReturnPart(
152146
tool_name='update_file',
153147
content="File '.env' updated: ''",
154148
tool_call_id='update_file_dotenv',
155149
timestamp=datetime.datetime(...),
156150
),
151+
ToolReturnPart(
152+
tool_name='delete_file',
153+
content='Deleting files is not allowed',
154+
tool_call_id='delete_file',
155+
timestamp=datetime.datetime(...),
156+
),
157157
]
158158
),
159159
ModelResponse(

docs/logfire.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -263,6 +263,7 @@ The following providers have dedicated documentation on Pydantic AI:
263263
- [Agenta](https://docs.agenta.ai/observability/integrations/pydanticai)
264264
- [Confident AI](https://documentation.confident-ai.com/docs/llm-tracing/integrations/pydanticai)
265265
- [LangWatch](https://docs.langwatch.ai/integration/python/integrations/pydantic-ai)
266+
- [Braintrust](https://www.braintrust.dev/docs/integrations/sdk-integrations/pydantic-ai)
266267

267268
## Advanced usage
268269

docs/models/overview.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,12 @@ You can use [`FallbackModel`][pydantic_ai.models.fallback.FallbackModel] to atte
8686
in sequence until one successfully returns a result. Under the hood, Pydantic AI automatically switches
8787
from one model to the next if the current model returns a 4xx or 5xx status code.
8888

89+
!!! note
90+
91+
The provider SDKs on which Models are based (like OpenAI, Anthropic, etc.) often have built-in retry logic that can delay the `FallbackModel` from activating.
92+
93+
When using `FallbackModel`, it's recommended to disable provider SDK retries to ensure immediate fallback, for example by setting `max_retries=0` on a [custom OpenAI client](openai.md#custom-openai-client).
94+
8995
In the following example, the agent first makes a request to the OpenAI model (which fails due to an invalid API key),
9096
and then falls back to the Anthropic model.
9197

examples/pydantic_ai_examples/rag.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ async def build_search_db():
115115
async with httpx.AsyncClient() as client:
116116
response = await client.get(DOCS_JSON)
117117
response.raise_for_status()
118-
sections = sessions_ta.validate_json(response.content)
118+
sections = sections_ta.validate_json(response.content)
119119

120120
openai = AsyncOpenAI()
121121
logfire.instrument_openai(openai)
@@ -183,7 +183,7 @@ def embedding_content(self) -> str:
183183
return '\n\n'.join((f'path: {self.path}', f'title: {self.title}', self.content))
184184

185185

186-
sessions_ta = TypeAdapter(list[DocsSection])
186+
sections_ta = TypeAdapter(list[DocsSection])
187187

188188

189189
# pyright: reportUnknownMemberType=false

pydantic_ai_slim/pydantic_ai/_agent_graph.py

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -374,9 +374,10 @@ async def _prepare_request_parameters(
374374
) -> models.ModelRequestParameters:
375375
"""Build tools and create an agent model."""
376376
output_schema = ctx.deps.output_schema
377-
output_object = None
378-
if isinstance(output_schema, _output.NativeOutputSchema):
379-
output_object = output_schema.object_def
377+
378+
prompted_output_template = (
379+
output_schema.template if isinstance(output_schema, _output.PromptedOutputSchema) else None
380+
)
380381

381382
function_tools: list[ToolDefinition] = []
382383
output_tools: list[ToolDefinition] = []
@@ -391,7 +392,8 @@ async def _prepare_request_parameters(
391392
builtin_tools=ctx.deps.builtin_tools,
392393
output_mode=output_schema.mode,
393394
output_tools=output_tools,
394-
output_object=output_object,
395+
output_object=output_schema.object_def,
396+
prompted_output_template=prompted_output_template,
395397
allow_text_output=output_schema.allows_text,
396398
allow_image_output=output_schema.allows_image,
397399
)
@@ -489,7 +491,6 @@ async def _prepare_request(
489491
message_history = _clean_message_history(message_history)
490492

491493
model_request_parameters = await _prepare_request_parameters(ctx)
492-
model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
493494

494495
model_settings = ctx.deps.model_settings
495496
usage = ctx.state.usage
@@ -570,7 +571,7 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
570571
# we got an empty response.
571572
# this sometimes happens with anthropic (and perhaps other models)
572573
# when the model has already returned text along side tool calls
573-
if text_processor := output_schema.text_processor:
574+
if text_processor := output_schema.text_processor: # pragma: no branch
574575
# in this scenario, if text responses are allowed, we return text from the most recent model
575576
# response, if any
576577
for message in reversed(ctx.state.message_history):
@@ -584,8 +585,12 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
584585
# not part of the final result output, so we reset the accumulated text
585586
text = '' # pragma: no cover
586587
if text:
587-
self._next_node = await self._handle_text_response(ctx, text, text_processor)
588-
return
588+
try:
589+
self._next_node = await self._handle_text_response(ctx, text, text_processor)
590+
return
591+
except ToolRetryError:
592+
# If the text from the preview response was invalid, ignore it.
593+
pass
589594

590595
# Go back to the model request node with an empty request, which means we'll essentially
591596
# resubmit the most recent request that resulted in an empty response,
@@ -622,11 +627,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
622627
else:
623628
assert_never(part)
624629

625-
# At the moment, we prioritize at least executing tool calls if they are present.
626-
# In the future, we'd consider making this configurable at the agent or run level.
627-
# This accounts for cases like anthropic returns that might contain a text response
628-
# and a tool call response, where the text response just indicates the tool call will happen.
629630
try:
631+
# At the moment, we prioritize at least executing tool calls if they are present.
632+
# In the future, we'd consider making this configurable at the agent or run level.
633+
# This accounts for cases like anthropic returns that might contain a text response
634+
# and a tool call response, where the text response just indicates the tool call will happen.
630635
alternatives: list[str] = []
631636
if tool_calls:
632637
async for event in self._handle_tool_calls(ctx, tool_calls):
@@ -770,7 +775,6 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT
770775
if ctx.deps.instrumentation_settings
771776
else DEFAULT_INSTRUMENTATION_VERSION,
772777
run_step=ctx.state.run_step,
773-
tool_call_approved=ctx.state.run_step == 0,
774778
)
775779

776780

@@ -1034,7 +1038,7 @@ async def _call_tool(
10341038
elif isinstance(tool_call_result, ToolApproved):
10351039
if tool_call_result.override_args is not None:
10361040
tool_call = dataclasses.replace(tool_call, args=tool_call_result.override_args)
1037-
tool_result = await tool_manager.handle_call(tool_call)
1041+
tool_result = await tool_manager.handle_call(tool_call, approved=True)
10381042
elif isinstance(tool_call_result, ToolDenied):
10391043
return _messages.ToolReturnPart(
10401044
tool_name=tool_call.tool_name,

0 commit comments

Comments
 (0)