Skip to content

Commit 8510b87

Browse files
committed
Merge remote-tracking branch 'origin/main' into psl/allow-partial
2 parents 87374d7 + 7ba1037 commit 8510b87

File tree

96 files changed

+386
-380
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

96 files changed

+386
-380
lines changed

.github/workflows/after-ci.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ jobs:
1616
steps:
1717
- uses: astral-sh/setup-uv@v5
1818
with:
19-
enable-cache: true
2019
python-version: "3.12"
2120

2221
- uses: dawidd6/action-download-artifact@v6
@@ -54,8 +53,9 @@ jobs:
5453

5554
- uses: astral-sh/setup-uv@v5
5655
with:
57-
enable-cache: true
5856
python-version: "3.12"
57+
enable-cache: true
58+
cache-suffix: deploy-docs-preview
5959

6060
- uses: dawidd6/action-download-artifact@v6
6161
with:

.github/workflows/ci.yml

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,15 @@ jobs:
2626
# Typecheck both Python 3.10 and 3.13. We've had issues due to not checking against both.
2727
python-version: ["3.10", "3.13"]
2828
env:
29-
UV_PYTHON: ${{ matrix.python-version }}
3029
PYRIGHT_PYTHON: ${{ matrix.python-version }}
3130
steps:
3231
- uses: actions/checkout@v4
3332

3433
- uses: astral-sh/setup-uv@v5
3534
with:
35+
python-version: ${{ matrix.python-version }}
3636
enable-cache: true
37+
cache-suffix: lint
3738

3839
- name: Install dependencies
3940
run: uv sync --all-extras --all-packages --group lint
@@ -56,6 +57,7 @@ jobs:
5657
- uses: astral-sh/setup-uv@v5
5758
with:
5859
enable-cache: true
60+
cache-suffix: mypy
5961

6062
- name: Install dependencies
6163
run: uv sync --no-dev --group lint
@@ -70,6 +72,7 @@ jobs:
7072
- uses: astral-sh/setup-uv@v5
7173
with:
7274
enable-cache: true
75+
cache-suffix: docs
7376

7477
- run: uv sync --group docs
7578

@@ -107,6 +110,7 @@ jobs:
107110
- uses: astral-sh/setup-uv@v5
108111
with:
109112
enable-cache: true
113+
cache-suffix: live
110114

111115
- uses: pydantic/ollama-action@v3
112116
with:
@@ -151,26 +155,27 @@ jobs:
151155
- name: all-extras
152156
command: "--all-extras"
153157
env:
154-
UV_PYTHON: ${{ matrix.python-version }}
155158
CI: true
156159
COVERAGE_PROCESS_START: ./pyproject.toml
157160
steps:
158161
- uses: actions/checkout@v4
159162

160163
- uses: astral-sh/setup-uv@v5
161164
with:
165+
python-version: ${{ matrix.python-version }}
162166
enable-cache: true
163-
prune-cache: false
167+
cache-suffix: ${{ matrix.install.name }}
164168

165169
- uses: denoland/setup-deno@v2
166170
with:
167171
deno-version: v2.x
168172

169173
- run: mkdir .coverage
170174

171-
- run: uv run mcp-run-python example --deps=numpy
172175
- run: uv sync --only-dev
173176

177+
- run: uv run mcp-run-python example --deps=numpy
178+
174179
- name: cache HuggingFace models
175180
uses: actions/cache@v4
176181
with:
@@ -199,16 +204,16 @@ jobs:
199204
matrix:
200205
python-version: ["3.10", "3.11", "3.12", "3.13"]
201206
env:
202-
UV_PYTHON: ${{ matrix.python-version }}
203207
CI: true
204208
COVERAGE_PROCESS_START: ./pyproject.toml
205209
steps:
206210
- uses: actions/checkout@v4
207211

208212
- uses: astral-sh/setup-uv@v5
209213
with:
214+
python-version: ${{ matrix.python-version }}
210215
enable-cache: true
211-
prune-cache: false
216+
cache-suffix: lowest-versions
212217

213218
- uses: denoland/setup-deno@v2
214219
with:
@@ -218,6 +223,8 @@ jobs:
218223

219224
- run: uv sync --group dev
220225

226+
- run: uv run mcp-run-python example --deps=numpy
227+
221228
- name: cache HuggingFace models
222229
uses: actions/cache@v4
223230
with:
@@ -226,8 +233,6 @@ jobs:
226233
restore-keys: |
227234
hf-${{ runner.os }}-
228235
229-
- run: uv run mcp-run-python example --deps=numpy
230-
231236
- run: unset UV_FROZEN
232237

233238
- run: uv run --all-extras --resolution lowest-direct coverage run -m pytest --durations=100 -n auto --dist=loadgroup
@@ -250,15 +255,15 @@ jobs:
250255
matrix:
251256
python-version: ["3.11", "3.12", "3.13"]
252257
env:
253-
UV_PYTHON: ${{ matrix.python-version }}
254258
CI: true
255259
steps:
256260
- uses: actions/checkout@v4
257261

258262
- uses: astral-sh/setup-uv@v5
259263
with:
264+
python-version: ${{ matrix.python-version }}
260265
enable-cache: true
261-
prune-cache: false
266+
cache-suffix: all-extras
262267

263268
- name: cache HuggingFace models
264269
uses: actions/cache@v4
@@ -288,6 +293,7 @@ jobs:
288293
- uses: astral-sh/setup-uv@v5
289294
with:
290295
enable-cache: true
296+
cache-suffix: dev
291297

292298
- run: uv sync --group dev
293299
- run: uv run coverage combine
@@ -343,6 +349,7 @@ jobs:
343349
- uses: astral-sh/setup-uv@v5
344350
with:
345351
enable-cache: true
352+
cache-suffix: docs-upload
346353

347354
- uses: actions/download-artifact@v4
348355
with:
@@ -384,6 +391,7 @@ jobs:
384391
- uses: astral-sh/setup-uv@v5
385392
with:
386393
enable-cache: true
394+
cache-suffix: deploy-docs-preview
387395

388396
- uses: actions/download-artifact@v4
389397
with:
@@ -431,6 +439,7 @@ jobs:
431439
- uses: astral-sh/setup-uv@v5
432440
with:
433441
enable-cache: true
442+
cache-suffix: release
434443

435444
- run: uv build --all-packages
436445

@@ -464,12 +473,7 @@ jobs:
464473
import os
465474
import tweepy
466475
467-
client = tweepy.Client(
468-
access_token=os.getenv("TWITTER_ACCESS_TOKEN"),
469-
access_token_secret=os.getenv("TWITTER_ACCESS_TOKEN_SECRET"),
470-
consumer_key=os.getenv("TWITTER_CONSUMER_KEY"),
471-
consumer_secret=os.getenv("TWITTER_CONSUMER_SECRET"),
472-
)
476+
client = tweepy.Client(os.getenv("TWITTER_ACCESS_TOKEN"))
473477
version = os.getenv("VERSION").strip('"')
474478
tweet = os.getenv("TWEET").format(version=version)
475479
client.create_tweet(text=tweet)
@@ -479,7 +483,4 @@ jobs:
479483
Pydantic AI version {version} is out! 🎉
480484
481485
https://github.com/pydantic/pydantic-ai/releases/tag/v{version}
482-
TWITTER_CONSUMER_KEY: ${{ secrets.TWITTER_CONSUMER_KEY }}
483-
TWITTER_CONSUMER_SECRET: ${{ secrets.TWITTER_CONSUMER_SECRET }}
484486
TWITTER_ACCESS_TOKEN: ${{ secrets.TWITTER_ACCESS_TOKEN }}
485-
TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}

.github/workflows/claude.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ jobs:
3737
- uses: astral-sh/setup-uv@v5
3838
with:
3939
enable-cache: true
40+
cache-suffix: claude-code
4041

4142
- uses: denoland/setup-deno@v2
4243
with:

.github/workflows/manually-deploy-docs.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ jobs:
1313
- uses: astral-sh/setup-uv@v5
1414
with:
1515
enable-cache: true
16+
cache-suffix: docs
1617

1718
- run: uv sync --group docs
1819

@@ -54,6 +55,7 @@ jobs:
5455
- uses: astral-sh/setup-uv@v5
5556
with:
5657
enable-cache: true
58+
cache-suffix: docs-upload
5759

5860
- uses: actions/download-artifact@v4
5961
with:

CLAUDE.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
2828

2929
**Model Integration (`pydantic_ai_slim/pydantic_ai/models/`)**
3030
- Unified interface across providers: OpenAI, Anthropic, Google, Groq, Cohere, Mistral, Bedrock, HuggingFace
31-
- Model strings: `"openai:gpt-4o"`, `"anthropic:claude-3-5-sonnet"`, `"google:gemini-1.5-pro"`
31+
- Model strings: `"openai:gpt-5"`, `"anthropic:claude-sonnet-4-5"`, `"google:gemini-2.5-pro"`
3232
- `ModelRequestParameters` for configuration, `StreamedResponse` for streaming
3333

3434
**Graph-based Execution (`pydantic_graph/` + `_agent_graph.py`)**
@@ -55,7 +55,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
5555
class MyDeps:
5656
database: DatabaseConn
5757

58-
agent = Agent('openai:gpt-4o', deps_type=MyDeps)
58+
agent = Agent('openai:gpt-5', deps_type=MyDeps)
5959

6060
@agent.tool
6161
async def get_data(ctx: RunContext[MyDeps]) -> str:
@@ -69,7 +69,7 @@ class OutputModel(BaseModel):
6969
confidence: float
7070

7171
agent: Agent[MyDeps, OutputModel] = Agent(
72-
'openai:gpt-4o',
72+
'openai:gpt-5',
7373
deps_type=MyDeps,
7474
output_type=OutputModel
7575
)

clai/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ positional arguments:
7070
options:
7171
-h, --help show this help message and exit
7272
-m [MODEL], --model [MODEL]
73-
Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "openai:gpt-4.1".
73+
Model to use, in format "<provider>:<model>" e.g. "openai:gpt-5" or "anthropic:claude-sonnet-4-5". Defaults to "openai:gpt-5".
7474
-a AGENT, --agent AGENT
7575
Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"
7676
-l, --list-models List all available models and exit

docs/a2a.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ We also built a convenience method that expose Pydantic AI agents as A2A servers
1010
```py {title="agent_to_a2a.py" hl_lines="4"}
1111
from pydantic_ai import Agent
1212

13-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
13+
agent = Agent('openai:gpt-5', instructions='Be fun!')
1414
app = agent.to_a2a()
1515
```
1616

@@ -104,7 +104,7 @@ To expose a Pydantic AI agent as an A2A server, you can use the `to_a2a` method:
104104
```python {title="agent_to_a2a.py"}
105105
from pydantic_ai import Agent
106106

107-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
107+
agent = Agent('openai:gpt-5', instructions='Be fun!')
108108
app = agent.to_a2a()
109109
```
110110

docs/ag-ui.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ from pydantic import ValidationError
5555
from pydantic_ai import Agent
5656
from pydantic_ai.ag_ui import SSE_CONTENT_TYPE, run_ag_ui
5757

58-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
58+
agent = Agent('openai:gpt-5', instructions='Be fun!')
5959

6060
app = FastAPI()
6161

@@ -97,7 +97,7 @@ from starlette.responses import Response
9797
from pydantic_ai import Agent
9898
from pydantic_ai.ag_ui import handle_ag_ui_request
9999

100-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
100+
agent = Agent('openai:gpt-5', instructions='Be fun!')
101101

102102
app = FastAPI()
103103

@@ -121,7 +121,7 @@ This example uses [`Agent.to_ag_ui()`][pydantic_ai.agent.AbstractAgent.to_ag_ui]
121121
```py {title="agent_to_ag_ui.py" hl_lines="4"}
122122
from pydantic_ai import Agent
123123

124-
agent = Agent('openai:gpt-4.1', instructions='Be fun!')
124+
agent = Agent('openai:gpt-5', instructions='Be fun!')
125125
app = agent.to_ag_ui()
126126
```
127127

@@ -184,7 +184,7 @@ class DocumentState(BaseModel):
184184

185185

186186
agent = Agent(
187-
'openai:gpt-4.1',
187+
'openai:gpt-5',
188188
instructions='Be fun!',
189189
deps_type=StateDeps[DocumentState],
190190
)
@@ -224,7 +224,7 @@ class DocumentState(BaseModel):
224224

225225

226226
agent = Agent(
227-
'openai:gpt-4.1',
227+
'openai:gpt-5',
228228
instructions='Be fun!',
229229
deps_type=StateDeps[DocumentState],
230230
)

0 commit comments

Comments
 (0)