Skip to content

Commit 69be302

Browse files
committed
Merge branch 'main' into custom-events
# Conflicts: # tests/test_agent.py
2 parents 9725ec2 + 4cc4f35 commit 69be302

35 files changed

+763
-516
lines changed

docs/api/models/function.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,14 @@ async def model_function(
3636
print(info)
3737
"""
3838
AgentInfo(
39-
function_tools=[], allow_text_output=True, output_tools=[], model_settings=None
39+
function_tools=[],
40+
allow_text_output=True,
41+
output_tools=[],
42+
model_settings=None,
43+
model_request_parameters=ModelRequestParameters(
44+
function_tools=[], builtin_tools=[], output_tools=[]
45+
),
46+
instructions=None,
4047
)
4148
"""
4249
return ModelResponse(parts=[TextPart('hello world')])

docs/builtin-tools.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ making it ideal for queries that require up-to-date data.
3131
|----------|-----------|-------|
3232
| OpenAI Responses || Full feature support. To include search results on the [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] that's available via [`ModelResponse.builtin_tool_calls`][pydantic_ai.messages.ModelResponse.builtin_tool_calls], enable the [`OpenAIResponsesModelSettings.openai_include_web_search_sources`][pydantic_ai.models.openai.OpenAIResponsesModelSettings.openai_include_web_search_sources] [model setting](agents.md#model-run-settings). |
3333
| Anthropic || Full feature support |
34-
| Google || No parameter support. No [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] or [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] is generated when streaming. Using built-in tools and user tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
34+
| Google || No parameter support. No [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] or [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] is generated when streaming. Using built-in tools and function tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
3535
| Groq || Limited parameter support. To use web search capabilities with Groq, you need to use the [compound models](https://console.groq.com/docs/compound). |
3636
| OpenAI Chat Completions || Not supported |
3737
| Bedrock || Not supported |
@@ -123,7 +123,7 @@ in a secure environment, making it perfect for computational tasks, data analysi
123123
| Provider | Supported | Notes |
124124
|----------|-----------|-------|
125125
| OpenAI || To include code execution output on the [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] that's available via [`ModelResponse.builtin_tool_calls`][pydantic_ai.messages.ModelResponse.builtin_tool_calls], enable the [`OpenAIResponsesModelSettings.openai_include_code_execution_outputs`][pydantic_ai.models.openai.OpenAIResponsesModelSettings.openai_include_code_execution_outputs] [model setting](agents.md#model-run-settings). If the code execution generated images, like charts, they will be available on [`ModelResponse.images`][pydantic_ai.messages.ModelResponse.images] as [`BinaryImage`][pydantic_ai.messages.BinaryImage] objects. The generated image can also be used as [image output](output.md#image-output) for the agent run. |
126-
| Google || Using built-in tools and user tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
126+
| Google || Using built-in tools and function tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
127127
| Anthropic || |
128128
| Groq || |
129129
| Bedrock || |
@@ -315,7 +315,7 @@ allowing it to pull up-to-date information from the web.
315315

316316
| Provider | Supported | Notes |
317317
|----------|-----------|-------|
318-
| Google || No [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] or [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] is currently generated; please submit an issue if you need this. Using built-in tools and user tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
318+
| Google || No [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] or [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] is currently generated; please submit an issue if you need this. Using built-in tools and function tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
319319
| OpenAI || |
320320
| Anthropic || |
321321
| Groq || |

pydantic_ai_slim/pydantic_ai/_agent_graph.py

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -375,9 +375,10 @@ async def _prepare_request_parameters(
375375
) -> models.ModelRequestParameters:
376376
"""Build tools and create an agent model."""
377377
output_schema = ctx.deps.output_schema
378-
output_object = None
379-
if isinstance(output_schema, _output.NativeOutputSchema):
380-
output_object = output_schema.object_def
378+
379+
prompted_output_template = (
380+
output_schema.template if isinstance(output_schema, _output.PromptedOutputSchema) else None
381+
)
381382

382383
function_tools: list[ToolDefinition] = []
383384
output_tools: list[ToolDefinition] = []
@@ -392,7 +393,8 @@ async def _prepare_request_parameters(
392393
builtin_tools=ctx.deps.builtin_tools,
393394
output_mode=output_schema.mode,
394395
output_tools=output_tools,
395-
output_object=output_object,
396+
output_object=output_schema.object_def,
397+
prompted_output_template=prompted_output_template,
396398
allow_text_output=output_schema.allows_text,
397399
allow_image_output=output_schema.allows_image,
398400
)
@@ -490,7 +492,6 @@ async def _prepare_request(
490492
message_history = _clean_message_history(message_history)
491493

492494
model_request_parameters = await _prepare_request_parameters(ctx)
493-
model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
494495

495496
model_settings = ctx.deps.model_settings
496497
usage = ctx.state.usage
@@ -571,7 +572,7 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
571572
# we got an empty response.
572573
# this sometimes happens with anthropic (and perhaps other models)
573574
# when the model has already returned text along side tool calls
574-
if text_processor := output_schema.text_processor:
575+
if text_processor := output_schema.text_processor: # pragma: no branch
575576
# in this scenario, if text responses are allowed, we return text from the most recent model
576577
# response, if any
577578
for message in reversed(ctx.state.message_history):
@@ -585,8 +586,12 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
585586
# not part of the final result output, so we reset the accumulated text
586587
text = '' # pragma: no cover
587588
if text:
588-
self._next_node = await self._handle_text_response(ctx, text, text_processor)
589-
return
589+
try:
590+
self._next_node = await self._handle_text_response(ctx, text, text_processor)
591+
return
592+
except ToolRetryError:
593+
# If the text from the preview response was invalid, ignore it.
594+
pass
590595

591596
# Go back to the model request node with an empty request, which means we'll essentially
592597
# resubmit the most recent request that resulted in an empty response,
@@ -623,11 +628,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
623628
else:
624629
assert_never(part)
625630

626-
# At the moment, we prioritize at least executing tool calls if they are present.
627-
# In the future, we'd consider making this configurable at the agent or run level.
628-
# This accounts for cases like anthropic returns that might contain a text response
629-
# and a tool call response, where the text response just indicates the tool call will happen.
630631
try:
632+
# At the moment, we prioritize at least executing tool calls if they are present.
633+
# In the future, we'd consider making this configurable at the agent or run level.
634+
# This accounts for cases like anthropic returns that might contain a text response
635+
# and a tool call response, where the text response just indicates the tool call will happen.
631636
alternatives: list[str] = []
632637
if tool_calls:
633638
async for event in self._handle_tool_calls(ctx, tool_calls):

pydantic_ai_slim/pydantic_ai/_output.py

Lines changed: 20 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
from pydantic import Json, TypeAdapter, ValidationError
1212
from pydantic_core import SchemaValidator, to_json
13-
from typing_extensions import Self, TypedDict, TypeVar, assert_never
13+
from typing_extensions import Self, TypedDict, TypeVar
1414

1515
from pydantic_ai._instrumentation import InstrumentationNames
1616

@@ -26,7 +26,6 @@
2626
OutputSpec,
2727
OutputTypeOrFunction,
2828
PromptedOutput,
29-
StructuredOutputMode,
3029
TextOutput,
3130
TextOutputFunc,
3231
ToolOutput,
@@ -36,7 +35,7 @@
3635
from .toolsets.abstract import AbstractToolset, ToolsetTool
3736

3837
if TYPE_CHECKING:
39-
from .profiles import ModelProfile
38+
pass
4039

4140
T = TypeVar('T')
4241
"""An invariant TypeVar."""
@@ -214,59 +213,30 @@ async def validate(
214213

215214

216215
@dataclass(kw_only=True)
217-
class BaseOutputSchema(ABC, Generic[OutputDataT]):
216+
class OutputSchema(ABC, Generic[OutputDataT]):
218217
text_processor: BaseOutputProcessor[OutputDataT] | None = None
219218
toolset: OutputToolset[Any] | None = None
219+
object_def: OutputObjectDefinition | None = None
220220
allows_deferred_tools: bool = False
221221
allows_image: bool = False
222222

223-
@abstractmethod
224-
def with_default_mode(self, mode: StructuredOutputMode) -> OutputSchema[OutputDataT]:
223+
@property
224+
def mode(self) -> OutputMode:
225225
raise NotImplementedError()
226226

227227
@property
228228
def allows_text(self) -> bool:
229229
return self.text_processor is not None
230230

231-
232-
@dataclass(init=False)
233-
class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
234-
"""Model the final output from an agent run."""
235-
236-
@classmethod
237-
@overload
238-
def build(
239-
cls,
240-
output_spec: OutputSpec[OutputDataT],
241-
*,
242-
default_mode: StructuredOutputMode,
243-
name: str | None = None,
244-
description: str | None = None,
245-
strict: bool | None = None,
246-
) -> OutputSchema[OutputDataT]: ...
247-
248-
@classmethod
249-
@overload
250-
def build(
251-
cls,
252-
output_spec: OutputSpec[OutputDataT],
253-
*,
254-
default_mode: None = None,
255-
name: str | None = None,
256-
description: str | None = None,
257-
strict: bool | None = None,
258-
) -> BaseOutputSchema[OutputDataT]: ...
259-
260231
@classmethod
261232
def build( # noqa: C901
262233
cls,
263234
output_spec: OutputSpec[OutputDataT],
264235
*,
265-
default_mode: StructuredOutputMode | None = None,
266236
name: str | None = None,
267237
description: str | None = None,
268238
strict: bool | None = None,
269-
) -> BaseOutputSchema[OutputDataT]:
239+
) -> OutputSchema[OutputDataT]:
270240
"""Build an OutputSchema dataclass from an output type."""
271241
outputs = _flatten_output_spec(output_spec)
272242

@@ -384,15 +354,12 @@ def build( # noqa: C901
384354
)
385355

386356
if len(other_outputs) > 0:
387-
schema = OutputSchemaWithoutMode(
357+
return AutoOutputSchema(
388358
processor=cls._build_processor(other_outputs, name=name, description=description, strict=strict),
389359
toolset=toolset,
390360
allows_deferred_tools=allows_deferred_tools,
391361
allows_image=allows_image,
392362
)
393-
if default_mode:
394-
schema = schema.with_default_mode(default_mode)
395-
return schema
396363

397364
if allows_image:
398365
return ImageOutputSchema(allows_deferred_tools=allows_deferred_tools)
@@ -412,22 +379,9 @@ def _build_processor(
412379

413380
return UnionOutputProcessor(outputs=outputs, strict=strict, name=name, description=description)
414381

415-
@property
416-
@abstractmethod
417-
def mode(self) -> OutputMode:
418-
raise NotImplementedError()
419-
420-
def raise_if_unsupported(self, profile: ModelProfile) -> None:
421-
"""Raise an error if the mode is not supported by this model."""
422-
if self.allows_image and not profile.supports_image_output:
423-
raise UserError('Image output is not supported by this model.')
424-
425-
def with_default_mode(self, mode: StructuredOutputMode) -> OutputSchema[OutputDataT]:
426-
return self
427-
428382

429383
@dataclass(init=False)
430-
class OutputSchemaWithoutMode(BaseOutputSchema[OutputDataT]):
384+
class AutoOutputSchema(OutputSchema[OutputDataT]):
431385
processor: BaseObjectOutputProcessor[OutputDataT]
432386

433387
def __init__(
@@ -441,32 +395,17 @@ def __init__(
441395
# At that point we may not know yet what output mode we're going to use if no model was provided or it was deferred until agent.run time,
442396
# but we cover ourselves just in case we end up using the tool output mode.
443397
super().__init__(
444-
allows_deferred_tools=allows_deferred_tools,
445398
toolset=toolset,
399+
object_def=processor.object_def,
446400
text_processor=processor,
401+
allows_deferred_tools=allows_deferred_tools,
447402
allows_image=allows_image,
448403
)
449404
self.processor = processor
450405

451-
def with_default_mode(self, mode: StructuredOutputMode) -> OutputSchema[OutputDataT]:
452-
if mode == 'native':
453-
return NativeOutputSchema(
454-
processor=self.processor,
455-
allows_deferred_tools=self.allows_deferred_tools,
456-
allows_image=self.allows_image,
457-
)
458-
elif mode == 'prompted':
459-
return PromptedOutputSchema(
460-
processor=self.processor,
461-
allows_deferred_tools=self.allows_deferred_tools,
462-
allows_image=self.allows_image,
463-
)
464-
elif mode == 'tool':
465-
return ToolOutputSchema(
466-
toolset=self.toolset, allows_deferred_tools=self.allows_deferred_tools, allows_image=self.allows_image
467-
)
468-
else:
469-
assert_never(mode)
406+
@property
407+
def mode(self) -> OutputMode:
408+
return 'auto'
470409

471410

472411
@dataclass(init=False)
@@ -488,10 +427,6 @@ def __init__(
488427
def mode(self) -> OutputMode:
489428
return 'text'
490429

491-
def raise_if_unsupported(self, profile: ModelProfile) -> None:
492-
"""Raise an error if the mode is not supported by this model."""
493-
super().raise_if_unsupported(profile)
494-
495430

496431
class ImageOutputSchema(OutputSchema[OutputDataT]):
497432
def __init__(self, *, allows_deferred_tools: bool):
@@ -501,11 +436,6 @@ def __init__(self, *, allows_deferred_tools: bool):
501436
def mode(self) -> OutputMode:
502437
return 'image'
503438

504-
def raise_if_unsupported(self, profile: ModelProfile) -> None:
505-
"""Raise an error if the mode is not supported by this model."""
506-
# This already raises if image output is not supported by this model.
507-
super().raise_if_unsupported(profile)
508-
509439

510440
@dataclass(init=False)
511441
class StructuredTextOutputSchema(OutputSchema[OutputDataT], ABC):
@@ -515,25 +445,19 @@ def __init__(
515445
self, *, processor: BaseObjectOutputProcessor[OutputDataT], allows_deferred_tools: bool, allows_image: bool
516446
):
517447
super().__init__(
518-
text_processor=processor, allows_deferred_tools=allows_deferred_tools, allows_image=allows_image
448+
text_processor=processor,
449+
object_def=processor.object_def,
450+
allows_deferred_tools=allows_deferred_tools,
451+
allows_image=allows_image,
519452
)
520453
self.processor = processor
521454

522-
@property
523-
def object_def(self) -> OutputObjectDefinition:
524-
return self.processor.object_def
525-
526455

527456
class NativeOutputSchema(StructuredTextOutputSchema[OutputDataT]):
528457
@property
529458
def mode(self) -> OutputMode:
530459
return 'native'
531460

532-
def raise_if_unsupported(self, profile: ModelProfile) -> None:
533-
"""Raise an error if the mode is not supported by this model."""
534-
if not profile.supports_json_schema_output:
535-
raise UserError('Native structured output is not supported by this model.')
536-
537461

538462
@dataclass(init=False)
539463
class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
@@ -572,14 +496,11 @@ def build_instructions(cls, template: str, object_def: OutputObjectDefinition) -
572496

573497
return template.format(schema=json.dumps(schema))
574498

575-
def raise_if_unsupported(self, profile: ModelProfile) -> None:
576-
"""Raise an error if the mode is not supported by this model."""
577-
super().raise_if_unsupported(profile)
578-
579-
def instructions(self, default_template: str) -> str:
499+
def instructions(self, default_template: str) -> str: # pragma: no cover
580500
"""Get instructions to tell model to output JSON matching the schema."""
581501
template = self.template or default_template
582502
object_def = self.object_def
503+
assert object_def is not None
583504
return self.build_instructions(template, object_def)
584505

585506

@@ -604,12 +525,6 @@ def __init__(
604525
def mode(self) -> OutputMode:
605526
return 'tool'
606527

607-
def raise_if_unsupported(self, profile: ModelProfile) -> None:
608-
"""Raise an error if the mode is not supported by this model."""
609-
super().raise_if_unsupported(profile)
610-
if not profile.supports_tools:
611-
raise UserError('Tool output is not supported by this model.')
612-
613528

614529
class BaseOutputProcessor(ABC, Generic[OutputDataT]):
615530
@abstractmethod

0 commit comments

Comments
 (0)