Skip to content

Commit 1578993

Browse files
committed
add model-based support and add tests
1 parent 41598ec commit 1578993

17 files changed

+1371
-43
lines changed

pydantic_ai_slim/pydantic_ai/models/__init__.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,10 @@ async def request(
377377
model_settings: ModelSettings | None,
378378
model_request_parameters: ModelRequestParameters,
379379
) -> ModelResponse:
380-
"""Make a request to the model."""
380+
"""Make a request to the model.
381+
382+
This is ultimately called by `pydantic_ai._agent_graph.ModelRequestNode._make_request(...)`.
383+
"""
381384
raise NotImplementedError()
382385

383386
async def count_tokens(

pydantic_ai_slim/pydantic_ai/models/anthropic.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -199,8 +199,9 @@ def __init__(
199199
model_name: The name of the Anthropic model to use. List of model names available
200200
[here](https://docs.anthropic.com/en/docs/about-claude/models).
201201
provider: The provider to use for the Anthropic API. Can be either the string 'anthropic' or an
202-
instance of `Provider[AsyncAnthropicClient]`. If not provided, the other parameters will be used.
202+
instance of `Provider[AsyncAnthropicClient]`. Defaults to 'anthropic'.
203203
profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
204+
The default 'anthropic' provider will use the default `..profiles.anthropic_model_profile`.
204205
settings: Default model settings for this model instance.
205206
"""
206207
self._model_name = model_name
@@ -290,13 +291,14 @@ def prepare_request(
290291
and thinking.get('type') == 'enabled'
291292
):
292293
if model_request_parameters.output_mode == 'auto':
293-
model_request_parameters = replace(model_request_parameters, output_mode='prompted')
294+
output_mode = 'native' if self.profile.supports_json_schema_output else 'prompted'
295+
model_request_parameters = replace(model_request_parameters, output_mode=output_mode)
294296
elif (
295297
model_request_parameters.output_mode == 'tool' and not model_request_parameters.allow_text_output
296298
): # pragma: no branch
297299
# This would result in `tool_choice=required`, which Anthropic does not support with thinking.
298300
raise UserError(
299-
'Anthropic does not support thinking and output tools at the same time. Use `output_type=PromptedOutput(...)` instead.'
301+
'Anthropic does not support thinking and output tools at the same time. Use `output_type=NativeOutput(...)` instead.'
300302
)
301303
return super().prepare_request(model_settings, model_request_parameters)
302304

@@ -330,15 +332,15 @@ async def _messages_create(
330332
# standalone function to make it easier to override
331333
tools, strict_tools_requested = self._get_tools(model_request_parameters, model_settings)
332334
tools, mcp_servers, beta_features = self._add_builtin_tools(tools, model_request_parameters)
333-
output_format = self._build_output_format(model_request_parameters)
335+
native_format = self._native_output_format(model_request_parameters)
334336

335337
tool_choice = self._infer_tool_choice(tools, model_settings, model_request_parameters)
336338

337339
system_prompt, anthropic_messages = await self._map_message(messages, model_request_parameters, model_settings)
338340

339341
# Build betas list for SDK
340342
betas: list[str] = list(beta_features)
341-
if strict_tools_requested or output_format:
343+
if strict_tools_requested or native_format:
342344
betas.append('structured-outputs-2025-11-13')
343345

344346
try:
@@ -354,7 +356,7 @@ async def _messages_create(
354356
tools=tools or OMIT,
355357
tool_choice=tool_choice or OMIT,
356358
mcp_servers=mcp_servers or OMIT,
357-
output_format=output_format or OMIT,
359+
output_format=native_format or OMIT,
358360
betas=betas or OMIT,
359361
stream=stream,
360362
thinking=model_settings.get('anthropic_thinking', OMIT),
@@ -849,19 +851,18 @@ async def _map_user_prompt(
849851
else:
850852
raise RuntimeError(f'Unsupported content type: {type(item)}') # pragma: no cover
851853

852-
@staticmethod
853-
def _map_tool_definition(f: ToolDefinition) -> BetaToolParam:
854+
def _map_tool_definition(self, f: ToolDefinition) -> BetaToolParam:
854855
tool_param: BetaToolParam = {
855856
'name': f.name,
856857
'description': f.description or '',
857858
'input_schema': f.parameters_json_schema,
858859
}
859-
if f.strict:
860+
if f.strict and self.profile.supports_json_schema_output: # pragma: no branch
860861
tool_param['strict'] = f.strict
861862
return tool_param
862863

863864
@staticmethod
864-
def _build_output_format(model_request_parameters: ModelRequestParameters) -> BetaJSONOutputFormatParam | None:
865+
def _native_output_format(model_request_parameters: ModelRequestParameters) -> BetaJSONOutputFormatParam | None:
865866
if model_request_parameters.output_mode != 'native':
866867
return None
867868
output_object = model_request_parameters.output_object

pydantic_ai_slim/pydantic_ai/profiles/__init__.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,19 @@ class ModelProfile:
2525
supports_tools: bool = True
2626
"""Whether the model supports tools."""
2727
supports_json_schema_output: bool = False
28-
"""Whether the model supports JSON schema output."""
28+
"""Whether the model supports JSON schema output.
29+
30+
This is also referred to as 'native' support for structured output.
31+
This is the preferred way to get structured output from the model when available.
32+
Relates to the `NativeOutput` output type.
33+
"""
2934
supports_json_object_output: bool = False
30-
"""Whether the model supports JSON object output."""
35+
"""Whether the model supports JSON object output.
36+
37+
This is different from `supports_json_schema_output` in that it indicates whether the model can return arbitrary JSON objects,
38+
rather than only JSON objects that conform to a provided JSON schema.
39+
Relates to the `PromptedOutput` output type.
40+
"""
3141
supports_image_output: bool = False
3242
"""Whether the model supports image output."""
3343
default_structured_output_mode: StructuredOutputMode = 'tool'

pydantic_ai_slim/pydantic_ai/profiles/anthropic.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,17 @@
11
from __future__ import annotations as _annotations
22

3-
from collections.abc import Callable
43
from dataclasses import dataclass
5-
from typing import Any
6-
7-
from anthropic.lib._parse._transform import SupportedStringFormats
84

95
from .._json_schema import JsonSchema, JsonSchemaTransformer
106
from . import ModelProfile
117

12-
TransformSchemaFunc = Callable[[Any], JsonSchema]
13-
148

159
def _schema_is_lossless(schema: JsonSchema) -> bool: # noqa: C901
16-
"""Return True when `anthropic.transform_schema` won't need to drop constraints."""
10+
"""Return True when `anthropic.transform_schema` won't need to drop constraints.
11+
12+
Checks are performed based on https://docs.claude.com/en/docs/build-with-claude/structured-outputs#how-sdk-transformation-works
13+
"""
14+
from anthropic.lib._parse._transform import SupportedStringFormats
1715

1816
def _walk(node: JsonSchema) -> bool: # noqa: C901
1917
if not isinstance(node, dict):
@@ -39,6 +37,8 @@ def _walk(node: JsonSchema) -> bool: # noqa: C901
3937
node.pop('description', None)
4038
node.pop('title', None)
4139

40+
# every sub-schema in the list must itself be lossless -> `all(_walk(item) for item in any_of)`
41+
# the wrapper object must not have any other unsupported fields -> `and not node`
4242
if isinstance(any_of, list):
4343
return all(_walk(item) for item in any_of) and not node # pyright: ignore[reportUnknownVariableType, reportUnknownArgumentType]
4444
if isinstance(one_of, list):
@@ -104,8 +104,12 @@ def transform(self, schema: JsonSchema) -> JsonSchema:
104104

105105
def anthropic_model_profile(model_name: str) -> ModelProfile | None:
106106
"""Get the model profile for an Anthropic model."""
107+
models_that_support_json_schema_output = ('claude-sonnet-4-5', 'claude-opus-4-1')
108+
# anthropic introduced support for both structured outputs and strict tool use
109+
# https://docs.claude.com/en/docs/build-with-claude/structured-outputs#example-usage
110+
supports_json_schema_output = model_name.startswith(models_that_support_json_schema_output)
107111
return ModelProfile(
108112
thinking_tags=('<thinking>', '</thinking>'),
109-
supports_json_schema_output=True,
113+
supports_json_schema_output=supports_json_schema_output,
110114
json_schema_transformer=AnthropicJsonSchemaTransformer,
111115
)

tests/CLAUDE.md

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
# Testing conventions
2+
3+
## general rules
4+
5+
- prefer using `snapshot()` instead of line-by-line assertions
6+
- unless the snapshot is too big and you only need to check specific values
7+
8+
### about static typing
9+
10+
- other codebases don't use types in their test files
11+
- but this codebase is fully typed with static types
12+
- proper types are required and the pre-commit hook sstrictly checks for types and won't allow commits with type errors
13+
- so you're required to use proper types in test files as well
14+
- refer to `tests/models/anthropic/conftest.py` for examples of typing in test files
15+
16+
## for testing filepaths
17+
18+
- define your function with a parameter `tmp_path: Path`
19+
20+
## examples
21+
22+
### inline vs snapshot
23+
```python
24+
completion_kwargs = get_mock_chat_completion_kwargs(mock_client)[0]
25+
assert 'tools' in completion_kwargs
26+
tools = completion_kwargs['tools']
27+
# By default, tools should be strict-compatible
28+
assert any(tool.get('strict') is True for tool in tools)
29+
# Should include structured-outputs beta
30+
assert 'structured-outputs-2025-11-13' in completion_kwargs.get('betas', [])
31+
```
32+
33+
can be simplified to
34+
35+
```python
36+
completion_kwargs = get_mock_chat_completion_kwargs(mock_client)[0]
37+
tools = completion_kwargs['tools']
38+
betas = completion_kwargs['betas']
39+
assert tools = snapshot()
40+
assert betas = snapshot()
41+
```
42+
43+
- it's preferable to use the snapshot, run the test and check what comes out
44+
- if the snapshot is too large in comparison with the equivalent inline assertions, it's ok to keep the inline assertions
45+
- confirm with the user what they prefer in cases that don't have a clear preference

tests/models/anthropic/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
"""Tests for Anthropic models."""
Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
interactions:
2+
- request:
3+
headers:
4+
accept:
5+
- application/json
6+
accept-encoding:
7+
- gzip, deflate
8+
connection:
9+
- keep-alive
10+
content-length:
11+
- '422'
12+
content-type:
13+
- application/json
14+
host:
15+
- api.anthropic.com
16+
method: POST
17+
parsed_body:
18+
max_tokens: 4096
19+
messages:
20+
- content:
21+
- text: What is the capital of France?
22+
type: text
23+
role: user
24+
model: claude-sonnet-4-0
25+
stream: false
26+
tool_choice:
27+
type: any
28+
tools:
29+
- description: A city and its country.
30+
input_schema:
31+
additionalProperties: false
32+
properties:
33+
city:
34+
type: string
35+
country:
36+
type: string
37+
required:
38+
- city
39+
- country
40+
type: object
41+
name: final_result
42+
uri: https://api.anthropic.com/v1/messages?beta=true
43+
response:
44+
headers:
45+
connection:
46+
- keep-alive
47+
content-length:
48+
- '503'
49+
content-type:
50+
- application/json
51+
retry-after:
52+
- '59'
53+
strict-transport-security:
54+
- max-age=31536000; includeSubDomains; preload
55+
transfer-encoding:
56+
- chunked
57+
parsed_body:
58+
content:
59+
- id: toolu_011CgnivckJbK9QLLe9LN1Rt
60+
input:
61+
city: Paris
62+
country: France
63+
name: final_result
64+
type: tool_use
65+
id: msg_01QgYbVnhrJe61wKzuPNxcsQ
66+
model: claude-sonnet-4-20250514
67+
role: assistant
68+
stop_reason: tool_use
69+
stop_sequence: null
70+
type: message
71+
usage:
72+
cache_creation:
73+
ephemeral_1h_input_tokens: 0
74+
ephemeral_5m_input_tokens: 0
75+
cache_creation_input_tokens: 0
76+
cache_read_input_tokens: 0
77+
input_tokens: 402
78+
output_tokens: 55
79+
service_tier: standard
80+
status:
81+
code: 200
82+
message: OK
83+
version: 1
Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
interactions:
2+
- request:
3+
headers:
4+
accept:
5+
- application/json
6+
accept-encoding:
7+
- gzip, deflate
8+
connection:
9+
- keep-alive
10+
content-length:
11+
- '436'
12+
content-type:
13+
- application/json
14+
host:
15+
- api.anthropic.com
16+
method: POST
17+
parsed_body:
18+
max_tokens: 4096
19+
messages:
20+
- content:
21+
- text: What is the capital of France?
22+
type: text
23+
role: user
24+
model: claude-sonnet-4-5
25+
stream: false
26+
tool_choice:
27+
type: any
28+
tools:
29+
- description: A city and its country.
30+
input_schema:
31+
additionalProperties: false
32+
properties:
33+
city:
34+
type: string
35+
country:
36+
type: string
37+
required:
38+
- city
39+
- country
40+
type: object
41+
name: final_result
42+
strict: true
43+
uri: https://api.anthropic.com/v1/messages?beta=true
44+
response:
45+
headers:
46+
connection:
47+
- keep-alive
48+
content-length:
49+
- '505'
50+
content-type:
51+
- application/json
52+
retry-after:
53+
- '1'
54+
strict-transport-security:
55+
- max-age=31536000; includeSubDomains; preload
56+
transfer-encoding:
57+
- chunked
58+
parsed_body:
59+
content:
60+
- id: toolu_01M1wKLEV2vS9t2oy4RihDKo
61+
input:
62+
city: Paris
63+
country: France
64+
name: final_result
65+
type: tool_use
66+
id: msg_01AW9scKTcw7CLGFCFrzfGNw
67+
model: claude-sonnet-4-5-20250929
68+
role: assistant
69+
stop_reason: tool_use
70+
stop_sequence: null
71+
type: message
72+
usage:
73+
cache_creation:
74+
ephemeral_1h_input_tokens: 0
75+
ephemeral_5m_input_tokens: 0
76+
cache_creation_input_tokens: 0
77+
cache_read_input_tokens: 0
78+
input_tokens: 675
79+
output_tokens: 55
80+
service_tier: standard
81+
status:
82+
code: 200
83+
message: OK
84+
version: 1

0 commit comments

Comments
 (0)