Skip to content

Commit a9bb84a

Browse files
committed
List imports from other example files in their own section
1 parent bf0eb18 commit a9bb84a

File tree

7 files changed

+45
-16
lines changed

7 files changed

+45
-16
lines changed

docs/evals.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ from dataclasses import dataclass
6060

6161
from pydantic_evals.evaluators import Evaluator, EvaluatorContext
6262
from pydantic_evals.evaluators.common import IsInstance
63+
6364
from simple_eval_dataset import dataset
6465

6566
dataset.add_evaluator(IsInstance(type_name='str')) # (1)!
@@ -618,10 +619,11 @@ You can also write datasets as JSON files:
618619
```python {title="generate_dataset_example_json.py" requires="generate_dataset_example.py"}
619620
from pathlib import Path
620621

621-
from generate_dataset_example import AnswerOutput, MetadataType, QuestionInputs
622622
from pydantic_evals import Dataset
623623
from pydantic_evals.generation import generate_dataset
624624

625+
from generate_dataset_example import AnswerOutput, MetadataType, QuestionInputs
626+
625627

626628
async def main():
627629
dataset = await generate_dataset( # (1)!

docs/output.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -314,6 +314,7 @@ To use this mode, you can wrap the output type(s) in the [`NativeOutput`][pydant
314314

315315
```python {title="native_output.py" requires="tool_output.py"}
316316
from pydantic_ai import Agent, NativeOutput
317+
317318
from tool_output import Fruit, Vehicle
318319

319320
agent = Agent(
@@ -347,6 +348,7 @@ To use this mode, you can wrap the output type(s) in the [`PromptedOutput`][pyda
347348
from pydantic import BaseModel
348349

349350
from pydantic_ai import Agent, PromptedOutput
351+
350352
from tool_output import Vehicle
351353

352354

docs/retries.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -258,6 +258,7 @@ The retry transports work with any provider that accepts a custom HTTP client:
258258
from pydantic_ai import Agent
259259
from pydantic_ai.models.openai import OpenAIChatModel
260260
from pydantic_ai.providers.openai import OpenAIProvider
261+
261262
from smart_retry_example import create_retrying_client
262263

263264
client = create_retrying_client()
@@ -271,6 +272,7 @@ agent = Agent(model)
271272
from pydantic_ai import Agent
272273
from pydantic_ai.models.anthropic import AnthropicModel
273274
from pydantic_ai.providers.anthropic import AnthropicProvider
275+
274276
from smart_retry_example import create_retrying_client
275277

276278
client = create_retrying_client()
@@ -284,6 +286,7 @@ agent = Agent(model)
284286
from pydantic_ai import Agent
285287
from pydantic_ai.models.openai import OpenAIChatModel
286288
from pydantic_ai.providers.openai import OpenAIProvider
289+
287290
from smart_retry_example import create_retrying_client
288291

289292
client = create_retrying_client()
@@ -320,6 +323,7 @@ The retry transports will re-raise the last exception if all retry attempts fail
320323
from pydantic_ai import Agent
321324
from pydantic_ai.models.openai import OpenAIChatModel
322325
from pydantic_ai.providers.openai import OpenAIProvider
326+
323327
from smart_retry_example import create_retrying_client
324328

325329
client = create_retrying_client()

docs/testing.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -253,6 +253,7 @@ Here's an example of a fixture that overrides the model with `TestModel`:
253253
import pytest
254254

255255
from pydantic_ai.models.test import TestModel
256+
256257
from weather_app import weather_agent
257258

258259

docs/toolsets.md

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -124,11 +124,12 @@ Toolsets can be composed to dynamically filter which tools are available, modify
124124
[`CombinedToolset`][pydantic_ai.toolsets.CombinedToolset] takes a list of toolsets and lets them be used as one.
125125

126126
```python {title="combined_toolset.py" requires="function_toolset.py"}
127-
from function_toolset import datetime_toolset, weather_toolset
128127
from pydantic_ai import Agent
129128
from pydantic_ai.models.test import TestModel
130129
from pydantic_ai.toolsets import CombinedToolset
131130

131+
from function_toolset import datetime_toolset, weather_toolset
132+
132133
combined_toolset = CombinedToolset([weather_toolset, datetime_toolset])
133134

134135
test_model = TestModel() # (1)!
@@ -149,10 +150,11 @@ _(This example is complete, it can be run "as is")_
149150
To easily chain different modifications, you can also call [`filtered()`][pydantic_ai.toolsets.AbstractToolset.filtered] on any toolset instead of directly constructing a `FilteredToolset`.
150151

151152
```python {title="filtered_toolset.py" requires="function_toolset.py,combined_toolset.py"}
152-
from combined_toolset import combined_toolset
153153
from pydantic_ai import Agent
154154
from pydantic_ai.models.test import TestModel
155155

156+
from combined_toolset import combined_toolset
157+
156158
filtered_toolset = combined_toolset.filtered(lambda ctx, tool_def: 'fahrenheit' not in tool_def.name)
157159

158160
test_model = TestModel() # (1)!
@@ -173,11 +175,12 @@ _(This example is complete, it can be run "as is")_
173175
To easily chain different modifications, you can also call [`prefixed()`][pydantic_ai.toolsets.AbstractToolset.prefixed] on any toolset instead of directly constructing a `PrefixedToolset`.
174176

175177
```python {title="combined_toolset.py" requires="function_toolset.py"}
176-
from function_toolset import datetime_toolset, weather_toolset
177178
from pydantic_ai import Agent
178179
from pydantic_ai.models.test import TestModel
179180
from pydantic_ai.toolsets import CombinedToolset
180181

182+
from function_toolset import datetime_toolset, weather_toolset
183+
181184
combined_toolset = CombinedToolset(
182185
[
183186
weather_toolset.prefixed('weather'),
@@ -210,10 +213,11 @@ _(This example is complete, it can be run "as is")_
210213
To easily chain different modifications, you can also call [`renamed()`][pydantic_ai.toolsets.AbstractToolset.renamed] on any toolset instead of directly constructing a `RenamedToolset`.
211214

212215
```python {title="renamed_toolset.py" requires="function_toolset.py,combined_toolset.py"}
213-
from combined_toolset import combined_toolset
214216
from pydantic_ai import Agent
215217
from pydantic_ai.models.test import TestModel
216218

219+
from combined_toolset import combined_toolset
220+
217221
renamed_toolset = combined_toolset.renamed(
218222
{
219223
'current_time': 'datetime_now',
@@ -250,6 +254,7 @@ from dataclasses import replace
250254

251255
from pydantic_ai import Agent, RunContext, ToolDefinition
252256
from pydantic_ai.models.test import TestModel
257+
253258
from renamed_toolset import renamed_toolset
254259

255260
descriptions = {
@@ -330,10 +335,11 @@ To easily chain different modifications, you can also call [`approval_required()
330335
See the [Human-in-the-Loop Tool Approval](tools.md#human-in-the-loop-tool-approval) documentation for more information on how to handle agent runs that call tools that require approval and how to pass in the results.
331336

332337
```python {title="approval_required_toolset.py" requires="function_toolset.py,combined_toolset.py,renamed_toolset.py,prepared_toolset.py"}
333-
from prepared_toolset import prepared_toolset
334338
from pydantic_ai import Agent, DeferredToolRequests, DeferredToolResults
335339
from pydantic_ai.models.test import TestModel
336340

341+
from prepared_toolset import prepared_toolset
342+
337343
approval_required_toolset = prepared_toolset.approval_required(lambda ctx, tool_def, tool_args: tool_def.name.startswith('temperature'))
338344

339345
test_model = TestModel(call_tools=['temperature_celsius', 'temperature_fahrenheit']) # (1)!
@@ -391,11 +397,12 @@ import asyncio
391397

392398
from typing_extensions import Any
393399

394-
from prepared_toolset import prepared_toolset
395400
from pydantic_ai import Agent, RunContext
396401
from pydantic_ai.models.test import TestModel
397402
from pydantic_ai.toolsets import ToolsetTool, WrapperToolset
398403

404+
from prepared_toolset import prepared_toolset
405+
399406
LOG = []
400407

401408
class LoggingToolset(WrapperToolset):
@@ -483,11 +490,12 @@ print(repr(result.output))
483490
Next, let's define a function that represents a hypothetical "run agent" API endpoint that can be called by the frontend and takes a list of messages to send to the model, a list of frontend tool definitions, and optional deferred tool results. This is where `ExternalToolset`, `DeferredToolRequests`, and `DeferredToolResults` come in:
484491

485492
```python {title="deferred_toolset_api.py" requires="deferred_toolset_agent.py"}
486-
from deferred_toolset_agent import PersonalizedGreeting, agent
487493
from pydantic_ai import DeferredToolRequests, DeferredToolResults, ToolDefinition
488494
from pydantic_ai.messages import ModelMessage
489495
from pydantic_ai.toolsets import ExternalToolset
490496

497+
from deferred_toolset_agent import PersonalizedGreeting, agent
498+
491499

492500
def run_agent(
493501
messages: list[ModelMessage] = [],
@@ -511,7 +519,6 @@ def run_agent(
511519
Now, imagine that the code below is implemented on the frontend, and `run_agent` stands in for an API call to the backend that runs the agent. This is where we actually execute the deferred tool calls and start a new run with the new result included:
512520

513521
```python {title="deferred_tools.py" requires="deferred_toolset_agent.py,deferred_toolset_api.py"}
514-
from deferred_toolset_api import run_agent
515522
from pydantic_ai import (
516523
DeferredToolRequests,
517524
DeferredToolResults,
@@ -520,6 +527,8 @@ from pydantic_ai import (
520527
)
521528
from pydantic_ai.messages import ModelMessage, ModelRequest, UserPromptPart
522529

530+
from deferred_toolset_api import run_agent
531+
523532
frontend_tool_definitions = [
524533
ToolDefinition(
525534
name='get_preferred_language',
@@ -592,10 +601,11 @@ By default, the function will be called again ahead of each agent run step. If y
592601
from dataclasses import dataclass
593602
from typing import Literal
594603

595-
from function_toolset import datetime_toolset, weather_toolset
596604
from pydantic_ai import Agent, RunContext
597605
from pydantic_ai.models.test import TestModel
598606

607+
from function_toolset import datetime_toolset, weather_toolset
608+
599609

600610
@dataclass
601611
class ToggleableDeps:

pydantic_ai_slim/pydantic_ai/output.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ class NativeOutput(Generic[OutputDataT]):
133133
Example:
134134
```python {title="native_output.py" requires="tool_output.py"}
135135
from pydantic_ai import Agent, NativeOutput
136+
136137
from tool_output import Fruit, Vehicle
137138
138139
agent = Agent(
@@ -181,6 +182,7 @@ class PromptedOutput(Generic[OutputDataT]):
181182
from pydantic import BaseModel
182183
183184
from pydantic_ai import Agent, PromptedOutput
185+
184186
from tool_output import Vehicle
185187
186188

tests/test_examples.py

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import ssl
88
import sys
99
from collections.abc import AsyncIterator, Iterable, Sequence
10-
from dataclasses import dataclass
10+
from dataclasses import dataclass, field
1111
from inspect import FrameInfo
1212
from io import StringIO
1313
from pathlib import Path
@@ -57,14 +57,18 @@
5757
code_examples: dict[str, CodeExample] = {}
5858

5959

60+
@dataclass
6061
class ExamplesConfig(BaseExamplesConfig):
62+
known_first_party: list[str] = field(default_factory=list)
63+
known_local_folder: list[str] = field(default_factory=list)
64+
6165
def ruff_config(self) -> tuple[str, ...]:
6266
config = super().ruff_config()
63-
return (
64-
*config,
65-
'--config',
66-
'lint.isort.known-first-party = ["pydantic_ai", "pydantic_evals", "pydantic_graph"]',
67-
)
67+
if self.known_first_party:
68+
config = (*config, '--config', f'lint.isort.known-first-party = {self.known_first_party}')
69+
if self.known_local_folder:
70+
config = (*config, '--config', f'lint.isort.known-local-folder = {self.known_local_folder}')
71+
return config
6872

6973

7074
def find_filter_examples() -> Iterable[ParameterSet]:
@@ -179,8 +183,10 @@ def print(self, *args: Any, **kwargs: Any) -> None:
179183
if opt_test.startswith('skip') and opt_lint.startswith('skip'):
180184
pytest.skip('both running code and lint skipped')
181185

186+
known_local_folder: list[str] = []
182187
if requires:
183188
for req in requires.split(','):
189+
known_local_folder.append(Path(req).stem)
184190
if ex := code_examples.get(req):
185191
(tmp_path_cwd / req).write_text(ex.source)
186192
else: # pragma: no cover
@@ -205,6 +211,8 @@ def print(self, *args: Any, **kwargs: Any) -> None:
205211
isort=True,
206212
upgrade=True,
207213
quotes='single',
214+
known_first_party=['pydantic_ai', 'pydantic_evals', 'pydantic_graph'],
215+
known_local_folder=known_local_folder,
208216
)
209217
eval_example.print_callback = print_callback
210218
eval_example.include_print = custom_include_print

0 commit comments

Comments
 (0)