|
4 | 4 | from collections.abc import AsyncIterator, Awaitable, Callable
|
5 | 5 | from dataclasses import dataclass, field
|
6 | 6 | from datetime import datetime
|
7 |
| -from typing import Generic, TypeVar, cast |
| 7 | +from typing import Generic, Union, cast |
8 | 8 |
|
9 | 9 | import logfire_api
|
| 10 | +from typing_extensions import TypeVar |
10 | 11 |
|
11 | 12 | from . import _result, _utils, exceptions, messages as _messages, models
|
12 | 13 | from .settings import UsageLimits
|
13 | 14 | from .tools import AgentDeps, RunContext
|
14 | 15 |
|
15 | 16 | __all__ = (
|
16 | 17 | 'ResultData',
|
| 18 | + 'ResultValidatorFunc', |
17 | 19 | 'Usage',
|
18 | 20 | 'RunResult',
|
19 | 21 | 'StreamedRunResult',
|
20 | 22 | )
|
21 | 23 |
|
22 | 24 |
|
23 |
| -ResultData = TypeVar('ResultData') |
| 25 | +ResultData = TypeVar('ResultData', default=str) |
24 | 26 | """Type variable for the result data of a run."""
|
25 | 27 |
|
| 28 | +ResultValidatorFunc = Union[ |
| 29 | + Callable[[RunContext[AgentDeps], ResultData], ResultData], |
| 30 | + Callable[[RunContext[AgentDeps], ResultData], Awaitable[ResultData]], |
| 31 | + Callable[[ResultData], ResultData], |
| 32 | + Callable[[ResultData], Awaitable[ResultData]], |
| 33 | +] |
| 34 | +""" |
| 35 | +A function that always takes `ResultData` and returns `ResultData` and: |
| 36 | +
|
| 37 | +* may or may not take [`RunContext`][pydantic_ai.tools.RunContext] as a first argument |
| 38 | +* may or may not be async |
| 39 | +
|
| 40 | +Usage `ResultValidatorFunc[AgentDeps, ResultData]`. |
| 41 | +""" |
| 42 | + |
26 | 43 | _logfire = logfire_api.Logfire(otel_scope='pydantic-ai')
|
27 | 44 |
|
28 | 45 |
|
29 | 46 | @dataclass
|
30 | 47 | class Usage:
|
31 |
| - """LLM usage associated to a request or run. |
| 48 | + """LLM usage associated with a request or run. |
32 | 49 |
|
33 | 50 | Responsibility for calculating usage is on the model; PydanticAI simply sums the usage information across requests.
|
34 | 51 |
|
35 | 52 | You'll need to look up the documentation of the model you're using to convert usage to monetary costs.
|
36 | 53 | """
|
37 | 54 |
|
38 | 55 | requests: int = 0
|
39 |
| - """Number of requests made.""" |
| 56 | + """Number of requests made to the LLM API.""" |
40 | 57 | request_tokens: int | None = None
|
41 | 58 | """Tokens used in processing requests."""
|
42 | 59 | response_tokens: int | None = None
|
|
0 commit comments