Skip to content

Commit a949ddd

Browse files
committed
Merge branch 'main' into dmontagu/new-graph-api
2 parents 00b98db + 6cf43ea commit a949ddd

File tree

28 files changed

+2327
-206
lines changed

28 files changed

+2327
-206
lines changed

docs/logfire.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,14 @@ The following providers have dedicated documentation on Pydantic AI:
268268

269269
### Configuring data format
270270

271-
Pydantic AI follows the [OpenTelemetry Semantic Conventions for Generative AI systems](https://opentelemetry.io/docs/specs/semconv/gen-ai/). Specifically, it follows version 1.37.0 of the conventions by default. To use [version 1.36.0](https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/gen-ai/README.md) or older, pass [`InstrumentationSettings(version=1)`][pydantic_ai.models.instrumented.InstrumentationSettings] (the default is `version=2`). Moreover, those semantic conventions specify that messages should be captured as individual events (logs) that are children of the request span, whereas by default, Pydantic AI instead collects these events into a JSON array which is set as a single large attribute called `events` on the request span. To change this, use `event_mode='logs'`:
271+
Pydantic AI follows the [OpenTelemetry Semantic Conventions for Generative AI systems](https://opentelemetry.io/docs/specs/semconv/gen-ai/). Specifically, it follows version 1.37.0 of the conventions by default, with a few exceptions. Certain span and attribute names are not spec compliant by default for compatibility reasons, but can be made compliant by passing [`InstrumentationSettings(version=3)`][pydantic_ai.models.instrumented.InstrumentationSettings] (the default is currently `version=2`). This will change the following:
272+
273+
- The span name `agent run` becomes `invoke_agent {gen_ai.agent.name}` (with the agent name filled in)
274+
- The span name `running tool` becomes `execute_tool {gen_ai.tool.name}` (with the tool name filled in)
275+
- The attribute name `tool_arguments` becomes `gen_ai.tool.call.arguments`
276+
- The attribute name `tool_response` becomes `gen_ai.tool.call.result`
277+
278+
To use [OpenTelemetry semantic conventions version 1.36.0](https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/gen-ai/README.md) or older, pass [`InstrumentationSettings(version=1)`][pydantic_ai.models.instrumented.InstrumentationSettings]. Moreover, those semantic conventions specify that messages should be captured as individual events (logs) that are children of the request span, whereas by default, Pydantic AI instead collects these events into a JSON array which is set as a single large attribute called `events` on the request span. To change this, use `event_mode='logs'`:
272279

273280
```python {title="instrumentation_settings_event_mode.py"}
274281
import logfire

pydantic_ai_slim/pydantic_ai/_agent_graph.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -796,16 +796,14 @@ async def process_tool_calls( # noqa: C901
796796
# Then, we handle function tool calls
797797
calls_to_run: list[_messages.ToolCallPart] = []
798798
if final_result and ctx.deps.end_strategy == 'early':
799-
output_parts.extend(
800-
[
799+
for call in tool_calls_by_kind['function']:
800+
output_parts.append(
801801
_messages.ToolReturnPart(
802802
tool_name=call.tool_name,
803803
content='Tool not executed - a final result was already processed.',
804804
tool_call_id=call.tool_call_id,
805805
)
806-
for call in tool_calls_by_kind['function']
807-
]
808-
)
806+
)
809807
else:
810808
calls_to_run.extend(tool_calls_by_kind['function'])
811809

@@ -851,14 +849,17 @@ async def process_tool_calls( # noqa: C901
851849
if tool_call_results is None:
852850
calls = [*tool_calls_by_kind['external'], *tool_calls_by_kind['unapproved']]
853851
if final_result:
854-
for call in calls:
855-
output_parts.append(
856-
_messages.ToolReturnPart(
857-
tool_name=call.tool_name,
858-
content='Tool not executed - a final result was already processed.',
859-
tool_call_id=call.tool_call_id,
852+
# If the run was already determined to end on deferred tool calls,
853+
# we shouldn't insert return parts as the deferred tools will still get a real result.
854+
if not isinstance(final_result.output, _output.DeferredToolRequests):
855+
for call in calls:
856+
output_parts.append(
857+
_messages.ToolReturnPart(
858+
tool_name=call.tool_name,
859+
content='Tool not executed - a final result was already processed.',
860+
tool_call_id=call.tool_call_id,
861+
)
860862
)
861-
)
862863
elif calls:
863864
deferred_calls['external'].extend(tool_calls_by_kind['external'])
864865
deferred_calls['unapproved'].extend(tool_calls_by_kind['unapproved'])

pydantic_ai_slim/pydantic_ai/_cli.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -228,15 +228,15 @@ async def run_chat(
228228
prog_name: str,
229229
config_dir: Path | None = None,
230230
deps: AgentDepsT = None,
231-
message_history: list[ModelMessage] | None = None,
231+
message_history: Sequence[ModelMessage] | None = None,
232232
) -> int:
233233
prompt_history_path = (config_dir or PYDANTIC_AI_HOME) / PROMPT_HISTORY_FILENAME
234234
prompt_history_path.parent.mkdir(parents=True, exist_ok=True)
235235
prompt_history_path.touch(exist_ok=True)
236236
session: PromptSession[Any] = PromptSession(history=FileHistory(str(prompt_history_path)))
237237

238238
multiline = False
239-
messages: list[ModelMessage] = message_history[:] if message_history else []
239+
messages: list[ModelMessage] = list(message_history) if message_history else []
240240

241241
while True:
242242
try:
@@ -272,7 +272,7 @@ async def ask_agent(
272272
console: Console,
273273
code_theme: str,
274274
deps: AgentDepsT = None,
275-
messages: list[ModelMessage] | None = None,
275+
messages: Sequence[ModelMessage] | None = None,
276276
) -> list[ModelMessage]:
277277
status = Status('[dim]Working on it…[/dim]', console=console)
278278

pydantic_ai_slim/pydantic_ai/agent/__init__.py

Lines changed: 33 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,7 @@ def __init__(
342342

343343
self._event_stream_handler = event_stream_handler
344344

345+
self._override_name: ContextVar[_utils.Option[str]] = ContextVar('_override_name', default=None)
345346
self._override_deps: ContextVar[_utils.Option[AgentDepsT]] = ContextVar('_override_deps', default=None)
346347
self._override_model: ContextVar[_utils.Option[models.Model]] = ContextVar('_override_model', default=None)
347348
self._override_toolsets: ContextVar[_utils.Option[Sequence[AbstractToolset[AgentDepsT]]]] = ContextVar(
@@ -382,7 +383,8 @@ def name(self) -> str | None:
382383
383384
If `None`, we try to infer the agent name from the call frame when the agent is first run.
384385
"""
385-
return self._name
386+
name_ = self._override_name.get()
387+
return name_.value if name_ else self._name
386388

387389
@name.setter
388390
def name(self, value: str | None) -> None:
@@ -413,7 +415,7 @@ def iter(
413415
user_prompt: str | Sequence[_messages.UserContent] | None = None,
414416
*,
415417
output_type: None = None,
416-
message_history: list[_messages.ModelMessage] | None = None,
418+
message_history: Sequence[_messages.ModelMessage] | None = None,
417419
deferred_tool_results: DeferredToolResults | None = None,
418420
model: models.Model | models.KnownModelName | str | None = None,
419421
deps: AgentDepsT = None,
@@ -422,6 +424,7 @@ def iter(
422424
usage: _usage.RunUsage | None = None,
423425
infer_name: bool = True,
424426
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
427+
builtin_tools: Sequence[AbstractBuiltinTool] | None = None,
425428
) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, OutputDataT]]: ...
426429

427430
@overload
@@ -430,7 +433,7 @@ def iter(
430433
user_prompt: str | Sequence[_messages.UserContent] | None = None,
431434
*,
432435
output_type: OutputSpec[RunOutputDataT],
433-
message_history: list[_messages.ModelMessage] | None = None,
436+
message_history: Sequence[_messages.ModelMessage] | None = None,
434437
deferred_tool_results: DeferredToolResults | None = None,
435438
model: models.Model | models.KnownModelName | str | None = None,
436439
deps: AgentDepsT = None,
@@ -439,6 +442,7 @@ def iter(
439442
usage: _usage.RunUsage | None = None,
440443
infer_name: bool = True,
441444
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
445+
builtin_tools: Sequence[AbstractBuiltinTool] | None = None,
442446
) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, RunOutputDataT]]: ...
443447

444448
@asynccontextmanager
@@ -447,7 +451,7 @@ async def iter(
447451
user_prompt: str | Sequence[_messages.UserContent] | None = None,
448452
*,
449453
output_type: OutputSpec[RunOutputDataT] | None = None,
450-
message_history: list[_messages.ModelMessage] | None = None,
454+
message_history: Sequence[_messages.ModelMessage] | None = None,
451455
deferred_tool_results: DeferredToolResults | None = None,
452456
model: models.Model | models.KnownModelName | str | None = None,
453457
deps: AgentDepsT = None,
@@ -456,6 +460,7 @@ async def iter(
456460
usage: _usage.RunUsage | None = None,
457461
infer_name: bool = True,
458462
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
463+
builtin_tools: Sequence[AbstractBuiltinTool] | None = None,
459464
) -> AsyncIterator[AgentRun[AgentDepsT, Any]]:
460465
"""A contextmanager which can be used to iterate over the agent graph's nodes as they are executed.
461466
@@ -528,6 +533,7 @@ async def main():
528533
usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
529534
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
530535
toolsets: Optional additional toolsets for this run.
536+
builtin_tools: Optional additional builtin tools for this run.
531537
532538
Returns:
533539
The result of the run.
@@ -562,7 +568,7 @@ async def main():
562568
# Build the initial state
563569
usage = usage or _usage.RunUsage()
564570
state = _agent_graph.GraphAgentState(
565-
message_history=message_history[:] if message_history else [],
571+
message_history=list(message_history) if message_history else [],
566572
usage=usage,
567573
retries=0,
568574
run_step=0,
@@ -597,7 +603,16 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None:
597603
else:
598604
instrumentation_settings = None
599605
tracer = NoOpTracer()
600-
606+
if builtin_tools:
607+
# Deduplicate builtin tools passed to the agent and the run based on type
608+
builtin_tools = list(
609+
{
610+
**({type(tool): tool for tool in self._builtin_tools or []}),
611+
**({type(tool): tool for tool in builtin_tools}),
612+
}.values()
613+
)
614+
else:
615+
builtin_tools = list(self._builtin_tools)
601616
graph_deps = _agent_graph.GraphAgentDeps[AgentDepsT, RunOutputDataT](
602617
user_deps=deps,
603618
prompt=user_prompt,
@@ -610,7 +625,7 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None:
610625
output_schema=output_schema,
611626
output_validators=output_validators,
612627
history_processors=self.history_processors,
613-
builtin_tools=list(self._builtin_tools),
628+
builtin_tools=builtin_tools,
614629
tool_manager=tool_manager,
615630
tracer=tracer,
616631
get_instructions=get_instructions,
@@ -686,7 +701,7 @@ def _run_span_end_attributes(
686701
}
687702
else:
688703
attrs = {
689-
'pydantic_ai.all_messages': json.dumps(settings.messages_to_otel_messages(state.message_history)),
704+
'pydantic_ai.all_messages': json.dumps(settings.messages_to_otel_messages(list(state.message_history))),
690705
**settings.system_instructions_attributes(literal_instructions),
691706
}
692707

@@ -708,24 +723,31 @@ def _run_span_end_attributes(
708723
def override(
709724
self,
710725
*,
726+
name: str | _utils.Unset = _utils.UNSET,
711727
deps: AgentDepsT | _utils.Unset = _utils.UNSET,
712728
model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET,
713729
toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET,
714730
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET,
715731
instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET,
716732
) -> Iterator[None]:
717-
"""Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions.
733+
"""Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions.
718734
719735
This is particularly useful when testing.
720736
You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures).
721737
722738
Args:
739+
name: The name to use instead of the name passed to the agent constructor and agent run.
723740
deps: The dependencies to use instead of the dependencies passed to the agent run.
724741
model: The model to use instead of the model passed to the agent run.
725742
toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run.
726743
tools: The tools to use instead of the tools registered with the agent.
727744
instructions: The instructions to use instead of the instructions registered with the agent.
728745
"""
746+
if _utils.is_set(name):
747+
name_token = self._override_name.set(_utils.Some(name))
748+
else:
749+
name_token = None
750+
729751
if _utils.is_set(deps):
730752
deps_token = self._override_deps.set(_utils.Some(deps))
731753
else:
@@ -755,6 +777,8 @@ def override(
755777
try:
756778
yield
757779
finally:
780+
if name_token is not None:
781+
self._override_name.reset(name_token)
758782
if deps_token is not None:
759783
self._override_deps.reset(deps_token)
760784
if model_token is not None:

0 commit comments

Comments
 (0)