Skip to content

Commit cf55a71

Browse files
authored
Use latest OpenAI, Google, Anthropic models in all examples (#3278)
1 parent c4c9e77 commit cf55a71

File tree

90 files changed

+354
-354
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

90 files changed

+354
-354
lines changed

CLAUDE.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
2828

2929
**Model Integration (`pydantic_ai_slim/pydantic_ai/models/`)**
3030
- Unified interface across providers: OpenAI, Anthropic, Google, Groq, Cohere, Mistral, Bedrock, HuggingFace
31-
- Model strings: `"openai:gpt-4o"`, `"anthropic:claude-3-5-sonnet"`, `"google:gemini-1.5-pro"`
31+
- Model strings: `"openai:gpt-5"`, `"anthropic:claude-sonnet-4-5"`, `"google:gemini-2.5-pro"`
3232
- `ModelRequestParameters` for configuration, `StreamedResponse` for streaming
3333

3434
**Graph-based Execution (`pydantic_graph/` + `_agent_graph.py`)**
@@ -55,7 +55,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
5555
class MyDeps:
5656
database: DatabaseConn
5757

58-
agent = Agent('openai:gpt-4o', deps_type=MyDeps)
58+
agent = Agent('openai:gpt-5', deps_type=MyDeps)
5959

6060
@agent.tool
6161
async def get_data(ctx: RunContext[MyDeps]) -> str:
@@ -69,7 +69,7 @@ class OutputModel(BaseModel):
6969
confidence: float
7070

7171
agent: Agent[MyDeps, OutputModel] = Agent(
72-
'openai:gpt-4o',
72+
'openai:gpt-5',
7373
deps_type=MyDeps,
7474
output_type=OutputModel
7575
)

clai/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ positional arguments:
7070
options:
7171
-h, --help show this help message and exit
7272
-m [MODEL], --model [MODEL]
73-
Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "openai:gpt-4.1".
73+
Model to use, in format "<provider>:<model>" e.g. "openai:gpt-5" or "anthropic:claude-sonnet-4-5". Defaults to "openai:gpt-5".
7474
-a AGENT, --agent AGENT
7575
Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"
7676
-l, --list-models List all available models and exit

docs/a2a.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ We also built a convenience method that expose Pydantic AI agents as A2A servers
1010
```py {title="agent_to_a2a.py" hl_lines="4"}
1111
from pydantic_ai import Agent
1212

13-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
13+
agent = Agent('openai:gpt-5', instructions='Be fun!')
1414
app = agent.to_a2a()
1515
```
1616

@@ -104,7 +104,7 @@ To expose a Pydantic AI agent as an A2A server, you can use the `to_a2a` method:
104104
```python {title="agent_to_a2a.py"}
105105
from pydantic_ai import Agent
106106

107-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
107+
agent = Agent('openai:gpt-5', instructions='Be fun!')
108108
app = agent.to_a2a()
109109
```
110110

docs/ag-ui.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ from pydantic import ValidationError
5555
from pydantic_ai import Agent
5656
from pydantic_ai.ag_ui import SSE_CONTENT_TYPE, run_ag_ui
5757

58-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
58+
agent = Agent('openai:gpt-5', instructions='Be fun!')
5959

6060
app = FastAPI()
6161

@@ -97,7 +97,7 @@ from starlette.responses import Response
9797
from pydantic_ai import Agent
9898
from pydantic_ai.ag_ui import handle_ag_ui_request
9999

100-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
100+
agent = Agent('openai:gpt-5', instructions='Be fun!')
101101

102102
app = FastAPI()
103103

@@ -121,7 +121,7 @@ This example uses [`Agent.to_ag_ui()`][pydantic_ai.agent.AbstractAgent.to_ag_ui]
121121
```py {title="agent_to_ag_ui.py" hl_lines="4"}
122122
from pydantic_ai import Agent
123123

124-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
124+
agent = Agent('openai:gpt-5', instructions='Be fun!')
125125
app = agent.to_ag_ui()
126126
```
127127

@@ -184,7 +184,7 @@ class DocumentState(BaseModel):
184184

185185

186186
agent = Agent(
187-
'openai:gpt-4.1',
187+
'openai:gpt-5',
188188
instructions='Be fun!',
189189
deps_type=StateDeps[DocumentState],
190190
)
@@ -224,7 +224,7 @@ class DocumentState(BaseModel):
224224

225225

226226
agent = Agent(
227-
'openai:gpt-4.1',
227+
'openai:gpt-5',
228228
instructions='Be fun!',
229229
deps_type=StateDeps[DocumentState],
230230
)

docs/agents.md

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ Here's a toy example of an agent that simulates a roulette wheel:
2424
from pydantic_ai import Agent, RunContext
2525

2626
roulette_agent = Agent( # (1)!
27-
'openai:gpt-4o',
27+
'openai:gpt-5',
2828
deps_type=int,
2929
output_type=bool,
3030
system_prompt=(
@@ -74,7 +74,7 @@ Here's a simple example demonstrating the first four:
7474
```python {title="run_agent.py"}
7575
from pydantic_ai import Agent, AgentRunResultEvent, AgentStreamEvent
7676

77-
agent = Agent('openai:gpt-4o')
77+
agent = Agent('openai:gpt-5')
7878

7979
result_sync = agent.run_sync('What is the capital of Italy?')
8080
print(result_sync.output)
@@ -148,7 +148,7 @@ from pydantic_ai import (
148148
)
149149

150150
weather_agent = Agent(
151-
'openai:gpt-4o',
151+
'openai:gpt-5',
152152
system_prompt='Providing a weather forecast at the locations the user provides.',
153153
)
154154

@@ -290,7 +290,7 @@ Here's an example of using `async for` with `iter` to record each node the agent
290290
```python {title="agent_iter_async_for.py"}
291291
from pydantic_ai import Agent
292292

293-
agent = Agent('openai:gpt-4o')
293+
agent = Agent('openai:gpt-5')
294294

295295

296296
async def main():
@@ -324,7 +324,7 @@ async def main():
324324
model_response=ModelResponse(
325325
parts=[TextPart(content='The capital of France is Paris.')],
326326
usage=RequestUsage(input_tokens=56, output_tokens=7),
327-
model_name='gpt-4o',
327+
model_name='gpt-5',
328328
timestamp=datetime.datetime(...),
329329
)
330330
),
@@ -348,7 +348,7 @@ You can also drive the iteration manually by passing the node you want to run ne
348348
from pydantic_ai import Agent
349349
from pydantic_graph import End
350350

351-
agent = Agent('openai:gpt-4o')
351+
agent = Agent('openai:gpt-5')
352352

353353

354354
async def main():
@@ -386,7 +386,7 @@ async def main():
386386
model_response=ModelResponse(
387387
parts=[TextPart(content='The capital of France is Paris.')],
388388
usage=RequestUsage(input_tokens=56, output_tokens=7),
389-
model_name='gpt-4o',
389+
model_name='gpt-5',
390390
timestamp=datetime.datetime(...),
391391
)
392392
),
@@ -443,7 +443,7 @@ class WeatherService:
443443

444444

445445
weather_agent = Agent[WeatherService, str](
446-
'openai:gpt-4o',
446+
'openai:gpt-5',
447447
deps_type=WeatherService,
448448
output_type=str, # We'll produce a final answer as plain text
449449
system_prompt='Providing a weather forecast at the locations the user provides.',
@@ -572,7 +572,7 @@ Consider the following example, where we limit the number of response tokens:
572572
```py
573573
from pydantic_ai import Agent, UsageLimitExceeded, UsageLimits
574574

575-
agent = Agent('anthropic:claude-3-5-sonnet-latest')
575+
agent = Agent('anthropic:claude-sonnet-4-5')
576576

577577
result_sync = agent.run_sync(
578578
'What is the capital of Italy? Answer with just the city.',
@@ -610,7 +610,7 @@ class NeverOutputType(TypedDict):
610610

611611

612612
agent = Agent(
613-
'anthropic:claude-3-5-sonnet-latest',
613+
'anthropic:claude-sonnet-4-5',
614614
retries=3,
615615
output_type=NeverOutputType,
616616
system_prompt='Any time you get a response, call the `infinite_retry_tool` to produce another response.',
@@ -643,7 +643,7 @@ from pydantic_ai import Agent
643643
from pydantic_ai.exceptions import UsageLimitExceeded
644644
from pydantic_ai.usage import UsageLimits
645645

646-
agent = Agent('anthropic:claude-3-5-sonnet-latest')
646+
agent = Agent('anthropic:claude-sonnet-4-5')
647647

648648
@agent.tool_plain
649649
def do_work() -> str:
@@ -682,7 +682,7 @@ from pydantic_ai.models.openai import OpenAIChatModel
682682

683683
# 1. Model-level defaults
684684
model = OpenAIChatModel(
685-
'gpt-4o',
685+
'gpt-5',
686686
settings=ModelSettings(temperature=0.8, max_tokens=500) # Base defaults
687687
)
688688

@@ -714,7 +714,7 @@ For example:
714714
from pydantic_ai import Agent, UnexpectedModelBehavior
715715
from pydantic_ai.models.google import GoogleModelSettings
716716

717-
agent = Agent('google-gla:gemini-1.5-flash')
717+
agent = Agent('google-gla:gemini-2.5-flash')
718718

719719
try:
720720
result = agent.run_sync(
@@ -752,7 +752,7 @@ Here's an example of a conversation comprised of multiple runs:
752752
```python {title="conversation_example.py" hl_lines="13"}
753753
from pydantic_ai import Agent
754754

755-
agent = Agent('openai:gpt-4o')
755+
agent = Agent('openai:gpt-5')
756756

757757
# First run
758758
result1 = agent.run_sync('Who was Albert Einstein?')
@@ -860,7 +860,7 @@ from datetime import date
860860
from pydantic_ai import Agent, RunContext
861861

862862
agent = Agent(
863-
'openai:gpt-4o',
863+
'openai:gpt-5',
864864
deps_type=str, # (1)!
865865
system_prompt="Use the customer's name while replying to them.", # (2)!
866866
)
@@ -916,7 +916,7 @@ from datetime import date
916916
from pydantic_ai import Agent, RunContext
917917

918918
agent = Agent(
919-
'openai:gpt-4o',
919+
'openai:gpt-5',
920920
deps_type=str, # (1)!
921921
instructions="Use the customer's name while replying to them.", # (2)!
922922
)
@@ -973,7 +973,7 @@ class ChatResult(BaseModel):
973973

974974

975975
agent = Agent(
976-
'openai:gpt-4o',
976+
'openai:gpt-5',
977977
deps_type=DatabaseConn,
978978
output_type=ChatResult,
979979
)
@@ -1011,7 +1011,7 @@ In these cases, [`capture_run_messages`][pydantic_ai.capture_run_messages] can b
10111011
```python {title="agent_model_errors.py"}
10121012
from pydantic_ai import Agent, ModelRetry, UnexpectedModelBehavior, capture_run_messages
10131013

1014-
agent = Agent('openai:gpt-4o')
1014+
agent = Agent('openai:gpt-5')
10151015

10161016

10171017
@agent.tool_plain
@@ -1051,7 +1051,7 @@ with capture_run_messages() as messages: # (2)!
10511051
)
10521052
],
10531053
usage=RequestUsage(input_tokens=62, output_tokens=4),
1054-
model_name='gpt-4o',
1054+
model_name='gpt-5',
10551055
timestamp=datetime.datetime(...),
10561056
),
10571057
ModelRequest(
@@ -1073,7 +1073,7 @@ with capture_run_messages() as messages: # (2)!
10731073
)
10741074
],
10751075
usage=RequestUsage(input_tokens=72, output_tokens=8),
1076-
model_name='gpt-4o',
1076+
model_name='gpt-5',
10771077
timestamp=datetime.datetime(...),
10781078
),
10791079
]

docs/api/models/function.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ from pydantic_ai import Agent
1414
from pydantic_ai import ModelMessage, ModelResponse, TextPart
1515
from pydantic_ai.models.function import FunctionModel, AgentInfo
1616

17-
my_agent = Agent('openai:gpt-4o')
17+
my_agent = Agent('openai:gpt-5')
1818

1919

2020
async def model_function(

docs/api/models/test.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ Here's a minimal example:
88
from pydantic_ai import Agent
99
from pydantic_ai.models.test import TestModel
1010

11-
my_agent = Agent('openai:gpt-4o', system_prompt='...')
11+
my_agent = Agent('openai:gpt-5', system_prompt='...')
1212

1313

1414
async def test_my_agent():

docs/builtin-tools.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ With OpenAI, you must use their Responses API to access the web search tool.
5959
```py {title="web_search_openai.py"}
6060
from pydantic_ai import Agent, WebSearchTool
6161

62-
agent = Agent('openai-responses:gpt-4.1', builtin_tools=[WebSearchTool()])
62+
agent = Agent('openai-responses:gpt-5', builtin_tools=[WebSearchTool()])
6363

6464
result = agent.run_sync('Give me a sentence with the biggest news in AI this week.')
6565
print(result.output)
@@ -201,7 +201,7 @@ The [`ImageGenerationTool`][pydantic_ai.builtin_tools.ImageGenerationTool] enabl
201201

202202
| Provider | Supported | Notes |
203203
|----------|-----------|-------|
204-
| OpenAI Responses || Full feature support. Only supported by models newer than `gpt-4o`. Metadata about the generated image, like the [`revised_prompt`](https://platform.openai.com/docs/guides/tools-image-generation#revised-prompt) sent to the underlying image model, is available on the [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] that's available via [`ModelResponse.builtin_tool_calls`][pydantic_ai.messages.ModelResponse.builtin_tool_calls]. |
204+
| OpenAI Responses || Full feature support. Only supported by models newer than `gpt-5`. Metadata about the generated image, like the [`revised_prompt`](https://platform.openai.com/docs/guides/tools-image-generation#revised-prompt) sent to the underlying image model, is available on the [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] that's available via [`ModelResponse.builtin_tool_calls`][pydantic_ai.messages.ModelResponse.builtin_tool_calls]. |
205205
| Google || No parameter support. Only supported by [image generation models](https://ai.google.dev/gemini-api/docs/image-generation) like `gemini-2.5-flash-image`. These models do not support [structured output](output.md) or [function tools](tools.md). These models will always generate images, even if this built-in tool is not explicitly specified. |
206206
| Anthropic || |
207207
| Groq || |

docs/cli.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ You can specify a custom agent using the `--agent` flag with a module path and v
7373
```python {title="custom_agent.py" test="skip"}
7474
from pydantic_ai import Agent
7575

76-
agent = Agent('openai:gpt-4.1', instructions='You always respond in Italian.')
76+
agent = Agent('openai:gpt-5', instructions='You always respond in Italian.')
7777
```
7878

7979
Then run:
@@ -92,7 +92,7 @@ Additionally, you can directly launch CLI mode from an `Agent` instance using `A
9292
```python {title="agent_to_cli_sync.py" test="skip" hl_lines=4}
9393
from pydantic_ai import Agent
9494

95-
agent = Agent('openai:gpt-4.1', instructions='You always respond in Italian.')
95+
agent = Agent('openai:gpt-5', instructions='You always respond in Italian.')
9696
agent.to_cli_sync()
9797
```
9898

@@ -101,7 +101,7 @@ You can also use the async interface with `Agent.to_cli()`:
101101
```python {title="agent_to_cli.py" test="skip" hl_lines=6}
102102
from pydantic_ai import Agent
103103

104-
agent = Agent('openai:gpt-4.1', instructions='You always respond in Italian.')
104+
agent = Agent('openai:gpt-5', instructions='You always respond in Italian.')
105105

106106
async def main():
107107
await agent.to_cli()
@@ -123,7 +123,7 @@ from pydantic_ai import (
123123
UserPromptPart,
124124
)
125125

126-
agent = Agent('openai:gpt-4.1')
126+
agent = Agent('openai:gpt-5')
127127

128128
# Create some conversation history
129129
message_history: list[ModelMessage] = [

docs/dependencies.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ class MyDeps: # (1)!
2727

2828

2929
agent = Agent(
30-
'openai:gpt-4o',
30+
'openai:gpt-5',
3131
deps_type=MyDeps, # (2)!
3232
)
3333

@@ -68,7 +68,7 @@ class MyDeps:
6868

6969

7070
agent = Agent(
71-
'openai:gpt-4o',
71+
'openai:gpt-5',
7272
deps_type=MyDeps,
7373
)
7474

@@ -126,7 +126,7 @@ class MyDeps:
126126

127127

128128
agent = Agent(
129-
'openai:gpt-4o',
129+
'openai:gpt-5',
130130
deps_type=MyDeps,
131131
)
132132

@@ -174,7 +174,7 @@ class MyDeps:
174174

175175

176176
agent = Agent(
177-
'openai:gpt-4o',
177+
'openai:gpt-5',
178178
deps_type=MyDeps,
179179
)
180180

@@ -251,7 +251,7 @@ class MyDeps:
251251
return f'Prompt: {response.text}'
252252

253253

254-
joke_agent = Agent('openai:gpt-4o', deps_type=MyDeps)
254+
joke_agent = Agent('openai:gpt-5', deps_type=MyDeps)
255255

256256

257257
@joke_agent.system_prompt

0 commit comments

Comments
 (0)