Skip to content

Commit 0f0c9de

Browse files
Merge branch 'main' into update-versins
2 parents bd9856e + c568ee9 commit 0f0c9de

File tree

7 files changed

+74
-18
lines changed

7 files changed

+74
-18
lines changed

pydantic_ai_slim/pydantic_ai/_agent_graph.py

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -815,14 +815,21 @@ def capture_run_messages() -> Iterator[list[_messages.ModelMessage]]:
815815
If you call `run`, `run_sync`, or `run_stream` more than once within a single `capture_run_messages` context,
816816
`messages` will represent the messages exchanged during the first call only.
817817
"""
818+
token = None
819+
messages: list[_messages.ModelMessage] = []
820+
821+
# Try to reuse existing message context if available
818822
try:
819-
yield _messages_ctx_var.get().messages
823+
messages = _messages_ctx_var.get().messages
820824
except LookupError:
821-
messages: list[_messages.ModelMessage] = []
825+
# No existing context, create a new one
822826
token = _messages_ctx_var.set(_RunMessages(messages))
823-
try:
824-
yield messages
825-
finally:
827+
828+
try:
829+
yield messages
830+
finally:
831+
# Clean up context if we created it
832+
if token is not None:
826833
_messages_ctx_var.reset(token)
827834

828835

pydantic_ai_slim/pydantic_ai/agent.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2218,12 +2218,18 @@ def _set_output_tool_return(self, return_content: str) -> list[_messages.ModelMe
22182218
"""
22192219
if not self._output_tool_name:
22202220
raise ValueError('Cannot set output tool return content when the return type is `str`.')
2221-
messages = deepcopy(self._state.message_history)
2221+
2222+
messages = self._state.message_history
22222223
last_message = messages[-1]
2223-
for part in last_message.parts:
2224+
for idx, part in enumerate(last_message.parts):
22242225
if isinstance(part, _messages.ToolReturnPart) and part.tool_name == self._output_tool_name:
2225-
part.content = return_content
2226-
return messages
2226+
# Only do deepcopy when we have to modify
2227+
copied_messages = list(messages)
2228+
copied_last = deepcopy(last_message)
2229+
copied_last.parts[idx].content = return_content # type: ignore[misc]
2230+
copied_messages[-1] = copied_last
2231+
return copied_messages
2232+
22272233
raise LookupError(f'No tool call found with tool name {self._output_tool_name!r}.')
22282234

22292235
@overload

pydantic_ai_slim/pydantic_ai/models/__init__.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -230,10 +230,16 @@
230230
'o1-mini-2024-09-12',
231231
'o1-preview',
232232
'o1-preview-2024-09-12',
233+
'o1-pro',
234+
'o1-pro-2025-03-19',
233235
'o3',
234236
'o3-2025-04-16',
237+
'o3-deep-research',
238+
'o3-deep-research-2025-06-26',
235239
'o3-mini',
236240
'o3-mini-2025-01-31',
241+
'o3-pro',
242+
'o3-pro-2025-06-10',
237243
'openai:chatgpt-4o-latest',
238244
'openai:codex-mini-latest',
239245
'openai:gpt-3.5-turbo',
@@ -283,12 +289,22 @@
283289
'openai:o1-mini-2024-09-12',
284290
'openai:o1-preview',
285291
'openai:o1-preview-2024-09-12',
292+
'openai:o1-pro',
293+
'openai:o1-pro-2025-03-19',
286294
'openai:o3',
287295
'openai:o3-2025-04-16',
296+
'openai:o3-deep-research',
297+
'openai:o3-deep-research-2025-06-26',
288298
'openai:o3-mini',
289299
'openai:o3-mini-2025-01-31',
290300
'openai:o4-mini',
291301
'openai:o4-mini-2025-04-16',
302+
'openai:o4-mini-deep-research',
303+
'openai:o4-mini-deep-research-2025-06-26',
304+
'openai:o3-pro',
305+
'openai:o3-pro-2025-06-10',
306+
'openai:computer-use-preview',
307+
'openai:computer-use-preview-2025-03-11',
292308
'test',
293309
],
294310
)

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050

5151
try:
5252
from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven
53-
from openai.types import ChatModel, chat, responses
53+
from openai.types import AllModels, chat, responses
5454
from openai.types.chat import (
5555
ChatCompletionChunk,
5656
ChatCompletionContentPartImageParam,
@@ -80,7 +80,7 @@
8080
'OpenAIModelName',
8181
)
8282

83-
OpenAIModelName = Union[str, ChatModel]
83+
OpenAIModelName = Union[str, AllModels]
8484
"""
8585
Possible OpenAI model names.
8686

pydantic_ai_slim/pydantic_ai/usage.py

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -57,13 +57,19 @@ def __add__(self, other: Usage) -> Usage:
5757

5858
def opentelemetry_attributes(self) -> dict[str, int]:
5959
"""Get the token limits as OpenTelemetry attributes."""
60-
result = {
61-
'gen_ai.usage.input_tokens': self.request_tokens,
62-
'gen_ai.usage.output_tokens': self.response_tokens,
63-
}
64-
for key, value in (self.details or {}).items():
65-
result[f'gen_ai.usage.details.{key}'] = value # pragma: no cover
66-
return {k: v for k, v in result.items() if v}
60+
result: dict[str, int] = {}
61+
if self.request_tokens:
62+
result['gen_ai.usage.input_tokens'] = self.request_tokens
63+
if self.response_tokens:
64+
result['gen_ai.usage.output_tokens'] = self.response_tokens
65+
details = self.details
66+
if details:
67+
prefix = 'gen_ai.usage.details.'
68+
for key, value in details.items():
69+
# Skipping check for value since spec implies all detail values are relevant
70+
if value:
71+
result[prefix + key] = value
72+
return result
6773

6874
def has_values(self) -> bool:
6975
"""Whether any values are set and non-zero."""

tests/test_agent.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2673,6 +2673,18 @@ def test_double_capture_run_messages() -> None:
26732673
)
26742674

26752675

2676+
def test_capture_run_messages_with_user_exception_does_not_contain_internal_errors() -> None:
2677+
"""Test that user exceptions within capture_run_messages context have clean stack traces."""
2678+
agent = Agent('test')
2679+
2680+
try:
2681+
with capture_run_messages():
2682+
agent.run_sync('Hello')
2683+
raise ZeroDivisionError('division by zero')
2684+
except Exception as e:
2685+
assert e.__context__ is None
2686+
2687+
26762688
def test_dynamic_false_no_reevaluate():
26772689
"""When dynamic is false (default), the system prompt is not reevaluated
26782690
i.e: SystemPromptPart(

tests/test_usage_limits.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,15 @@ async def delegate_to_other_agent2(ctx: RunContext[None], sentence: str) -> int:
176176
# confirm the usage from result2 is the sum of the usage from result1
177177
assert result2.usage() == functools.reduce(operator.add, run_1_usages)
178178

179+
result1_usage = result1.usage()
180+
result1_usage.details = {'custom1': 10, 'custom2': 20, 'custom3': 0}
181+
assert result1_usage.opentelemetry_attributes() == {
182+
'gen_ai.usage.input_tokens': 103,
183+
'gen_ai.usage.output_tokens': 13,
184+
'gen_ai.usage.details.custom1': 10,
185+
'gen_ai.usage.details.custom2': 20,
186+
}
187+
179188

180189
async def test_multi_agent_usage_sync():
181190
"""As in `test_multi_agent_usage_async`, with a sync tool."""

0 commit comments

Comments
 (0)