diff --git a/docs/builtin-tools.md b/docs/builtin-tools.md index d755702127..bf77f9628a 100644 --- a/docs/builtin-tools.md +++ b/docs/builtin-tools.md @@ -11,6 +11,7 @@ Pydantic AI supports the following built-in tools: - **[`ImageGenerationTool`][pydantic_ai.builtin_tools.ImageGenerationTool]**: Enables agents to generate images - **[`UrlContextTool`][pydantic_ai.builtin_tools.UrlContextTool]**: Enables agents to pull URL contents into their context - **[`MemoryTool`][pydantic_ai.builtin_tools.MemoryTool]**: Enables agents to use memory +- **[`MCPServerTool`][pydantic_ai.builtin_tools.MCPServerTool]**: Enables agents to use remote MCP servers with communication handled by the model provider These tools are passed to the agent via the `builtin_tools` parameter and are executed by the model provider's infrastructure. @@ -52,7 +53,7 @@ print(result.output) _(This example is complete, it can be run "as is")_ -With OpenAI, you must use their responses API to access the web search tool. +With OpenAI, you must use their Responses API to access the web search tool. ```py {title="web_search_openai.py"} from pydantic_ai import Agent, WebSearchTool @@ -419,6 +420,149 @@ print(result.output) _(This example is complete, it can be run "as is")_ +## MCP Server Tool + +The [`MCPServerTool`][pydantic_ai.builtin_tools.MCPServerTool] allows your agent to use remote MCP servers with communication handled by the model provider. + +This requires the MCP server to live at a public URL the provider can reach and does not support many of the advanced features of Pydantic AI's agent-side [MCP support](mcp/client.md), +but can result in optimized context use and caching, and faster performance due to the lack of a round-trip back to Pydantic AI. + +### Provider Support + +| Provider | Supported | Notes | +|----------|-----------|-----------------------| +| OpenAI Responses | ✅ | Full feature support. [Connectors](https://platform.openai.com/docs/guides/tools-connectors-mcp#connectors) can be used by specifying a special `x-openai-connector:` URL. | +| Anthropic | ✅ | Full feature support | +| Google | ❌ | Not supported | +| Groq | ❌ | Not supported | +| OpenAI Chat Completions | ❌ | Not supported | +| Bedrock | ❌ | Not supported | +| Mistral | ❌ | Not supported | +| Cohere | ❌ | Not supported | +| HuggingFace | ❌ | Not supported | + +### Usage + +```py {title="mcp_server_anthropic.py"} +from pydantic_ai import Agent, MCPServerTool + +agent = Agent( + 'anthropic:claude-sonnet-4-5', + builtin_tools=[ + MCPServerTool( + id='deepwiki', + url='https://mcp.deepwiki.com/mcp', # (1) + ) + ] +) + +result = agent.run_sync('Tell me about the pydantic/pydantic-ai repo.') +print(result.output) +""" +The pydantic/pydantic-ai repo is a Python agent framework for building Generative AI applications. +""" +``` + +1. The [DeepWiki MCP server](https://docs.devin.ai/work-with-devin/deepwiki-mcp) does not require authorization. + +_(This example is complete, it can be run "as is")_ + +With OpenAI, you must use their Responses API to access the MCP server tool: + +```py {title="mcp_server_openai.py"} +from pydantic_ai import Agent, MCPServerTool + +agent = Agent( + 'openai-responses:gpt-5', + builtin_tools=[ + MCPServerTool( + id='deepwiki', + url='https://mcp.deepwiki.com/mcp', # (1) + ) + ] +) + +result = agent.run_sync('Tell me about the pydantic/pydantic-ai repo.') +print(result.output) +""" +The pydantic/pydantic-ai repo is a Python agent framework for building Generative AI applications. +""" +``` + +1. The [DeepWiki MCP server](https://docs.devin.ai/work-with-devin/deepwiki-mcp) does not require authorization. + +_(This example is complete, it can be run "as is")_ + +### Configuration Options + +The `MCPServerTool` supports several configuration parameters for custom MCP servers: + +```py {title="mcp_server_configured_url.py"} +import os + +from pydantic_ai import Agent, MCPServerTool + +agent = Agent( + 'openai-responses:gpt-5', + builtin_tools=[ + MCPServerTool( + id='github', + url='https://api.githubcopilot.com/mcp/', + authorization_token=os.getenv('GITHUB_ACCESS_TOKEN', 'mock-access-token'), # (1) + allowed_tools=['search_repositories', 'list_commits'], + description='GitHub MCP server', + headers={'X-Custom-Header': 'custom-value'}, + ) + ] +) + +result = agent.run_sync('Tell me about the pydantic/pydantic-ai repo.') +print(result.output) +""" +The pydantic/pydantic-ai repo is a Python agent framework for building Generative AI applications. +""" +``` + +1. The [GitHub MCP server](https://github.com/github/github-mcp-server) requires an authorization token. + +For OpenAI Responses, you can use a [connector](https://platform.openai.com/docs/guides/tools-connectors-mcp#connectors) by specifying a special `x-openai-connector:` URL: + +_(This example is complete, it can be run "as is")_ + +```py {title="mcp_server_configured_connector_id.py"} +import os + +from pydantic_ai import Agent, MCPServerTool + +agent = Agent( + 'openai-responses:gpt-5', + builtin_tools=[ + MCPServerTool( + id='google-calendar', + url='x-openai-connector:connector_googlecalendar', + authorization_token=os.getenv('GOOGLE_API_KEY', 'mock-api-key'), # (1) + ) + ] +) + +result = agent.run_sync('What do I have on my calendar today?') +print(result.output) +#> You're going to spend all day playing with Pydantic AI. +``` + +1. OpenAI's Google Calendar connector requires an [authorization token](https://platform.openai.com/docs/guides/tools-connectors-mcp#authorizing-a-connector). + +_(This example is complete, it can be run "as is")_ + +#### Provider Support + +| Parameter | OpenAI | Anthropic | +|-----------------------|--------|-----------| +| `authorization_token` | ✅ | ✅ | +| `allowed_tools` | ✅ | ✅ | +| `description` | ✅ | ❌ | +| `headers` | ✅ | ❌ | + ## API Reference For complete API documentation, see the [API Reference](api/builtin_tools.md). diff --git a/docs/mcp/overview.md b/docs/mcp/overview.md index 5baff413ca..ce00808fa6 100644 --- a/docs/mcp/overview.md +++ b/docs/mcp/overview.md @@ -2,7 +2,9 @@ Pydantic AI supports [Model Context Protocol (MCP)](https://modelcontextprotocol.io) in two ways: -1. [Agents](../agents.md) act as an MCP Client, connecting to MCP servers to use their tools, [learn more …](client.md) +1. [Agents](../agents.md) can connect to MCP servers and user their tools + 1. Pydantic AI can act as an MCP client and connect directly to local and remote MCP servers, [learn more …](client.md) + 2. Some model providers can themselves connect to remote MCP servers, [learn more …](../builtin-tools.md#mcp-server-tool) 2. Agents can be used within MCP servers, [learn more …](server.md) ## What is MCP? diff --git a/docs/models/google.md b/docs/models/google.md index 986920245e..04914edeb4 100644 --- a/docs/models/google.md +++ b/docs/models/google.md @@ -140,7 +140,7 @@ agent = Agent(model) #### Customizing Location or Project -You can specify the location and/or projectwhen using Vertex AI: +You can specify the location and/or project when using Vertex AI: ```python {title="google_model_location.py" test="skip"} from pydantic_ai import Agent diff --git a/pydantic_ai_slim/pydantic_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/__init__.py index 77b605b5ee..9e67fa680b 100644 --- a/pydantic_ai_slim/pydantic_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/__init__.py @@ -12,6 +12,7 @@ from .builtin_tools import ( CodeExecutionTool, ImageGenerationTool, + MCPServerTool, MemoryTool, UrlContextTool, WebSearchTool, @@ -213,6 +214,7 @@ 'CodeExecutionTool', 'ImageGenerationTool', 'MemoryTool', + 'MCPServerTool', # output 'ToolOutput', 'NativeOutput', diff --git a/pydantic_ai_slim/pydantic_ai/builtin_tools.py b/pydantic_ai_slim/pydantic_ai/builtin_tools.py index 75e3d582e2..731594eb25 100644 --- a/pydantic_ai_slim/pydantic_ai/builtin_tools.py +++ b/pydantic_ai_slim/pydantic_ai/builtin_tools.py @@ -16,6 +16,7 @@ 'UrlContextTool', 'ImageGenerationTool', 'MemoryTool', + 'MCPServerTool', ) _BUILTIN_TOOL_TYPES: dict[str, type[AbstractBuiltinTool]] = {} @@ -263,6 +264,64 @@ class MemoryTool(AbstractBuiltinTool): """The kind of tool.""" +@dataclass(kw_only=True) +class MCPServerTool(AbstractBuiltinTool): + """A builtin tool that allows your agent to use MCP servers. + + Supported by: + + * OpenAI Responses + * Anthropic + """ + + id: str + """The ID of the MCP server.""" + + url: str + """The URL of the MCP server to use. + + For OpenAI Responses, it is possible to use `connector_id` by providing it as `x-openai-connector:`. + """ + + authorization_token: str | None = None + """Authorization header to use when making requests to the MCP server. + + Supported by: + + * OpenAI Responses + * Anthropic + """ + + description: str | None = None + """A description of the MCP server. + + Supported by: + + * OpenAI Responses + """ + + allowed_tools: list[str] | None = None + """A list of tools that the MCP server can use. + + Supported by: + + * OpenAI Responses + * Anthropic + """ + + headers: dict[str, str] | None = None + """Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + + Supported by: + + * OpenAI Responses + """ + + kind: str = 'mcp_server' + + def _tool_discriminator(tool_data: dict[str, Any] | AbstractBuiltinTool) -> str: if isinstance(tool_data, dict): return tool_data.get('kind', AbstractBuiltinTool.kind) diff --git a/pydantic_ai_slim/pydantic_ai/models/anthropic.py b/pydantic_ai_slim/pydantic_ai/models/anthropic.py index 1511f724bc..bbedf06a10 100644 --- a/pydantic_ai_slim/pydantic_ai/models/anthropic.py +++ b/pydantic_ai_slim/pydantic_ai/models/anthropic.py @@ -3,7 +3,7 @@ import io from collections.abc import AsyncGenerator, AsyncIterable, AsyncIterator from contextlib import asynccontextmanager -from dataclasses import dataclass, field +from dataclasses import dataclass, field, replace from datetime import datetime from typing import Any, Literal, cast, overload @@ -13,7 +13,7 @@ from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage from .._run_context import RunContext from .._utils import guard_tool_call_id as _guard_tool_call_id -from ..builtin_tools import CodeExecutionTool, MemoryTool, WebSearchTool +from ..builtin_tools import CodeExecutionTool, MCPServerTool, MemoryTool, WebSearchTool from ..exceptions import UserError from ..messages import ( BinaryContent, @@ -68,6 +68,9 @@ BetaContentBlockParam, BetaImageBlockParam, BetaInputJSONDelta, + BetaMCPToolResultBlock, + BetaMCPToolUseBlock, + BetaMCPToolUseBlockParam, BetaMemoryTool20250818Param, BetaMessage, BetaMessageParam, @@ -82,6 +85,8 @@ BetaRawMessageStreamEvent, BetaRedactedThinkingBlock, BetaRedactedThinkingBlockParam, + BetaRequestMCPServerToolConfigurationParam, + BetaRequestMCPServerURLDefinitionParam, BetaServerToolUseBlock, BetaServerToolUseBlockParam, BetaSignatureDelta, @@ -264,7 +269,7 @@ async def _messages_create( ) -> BetaMessage | AsyncStream[BetaRawMessageStreamEvent]: # standalone function to make it easier to override tools = self._get_tools(model_request_parameters) - tools, beta_features = self._add_builtin_tools(tools, model_request_parameters) + tools, mcp_servers, beta_features = self._add_builtin_tools(tools, model_request_parameters) tool_choice: BetaToolChoiceParam | None @@ -300,6 +305,7 @@ async def _messages_create( model=self._model_name, tools=tools or OMIT, tool_choice=tool_choice or OMIT, + mcp_servers=mcp_servers or OMIT, stream=stream, thinking=model_settings.get('anthropic_thinking', OMIT), stop_sequences=model_settings.get('stop_sequences', OMIT), @@ -333,6 +339,10 @@ def _process_response(self, response: BetaMessage) -> ModelResponse: ) elif isinstance(item, BetaThinkingBlock): items.append(ThinkingPart(content=item.thinking, signature=item.signature, provider_name=self.system)) + elif isinstance(item, BetaMCPToolUseBlock): + items.append(_map_mcp_server_use_block(item, self.system)) + elif isinstance(item, BetaMCPToolResultBlock): + items.append(_map_mcp_server_result_block(item, self.system)) else: assert isinstance(item, BetaToolUseBlock), f'unexpected item type {type(item)}' items.append( @@ -383,8 +393,9 @@ def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[B def _add_builtin_tools( self, tools: list[BetaToolUnionParam], model_request_parameters: ModelRequestParameters - ) -> tuple[list[BetaToolUnionParam], list[str]]: + ) -> tuple[list[BetaToolUnionParam], list[BetaRequestMCPServerURLDefinitionParam], list[str]]: beta_features: list[str] = [] + mcp_servers: list[BetaRequestMCPServerURLDefinitionParam] = [] for tool in model_request_parameters.builtin_tools: if isinstance(tool, WebSearchTool): user_location = UserLocation(type='approximate', **tool.user_location) if tool.user_location else None @@ -408,11 +419,26 @@ def _add_builtin_tools( tools = [tool for tool in tools if tool['name'] != 'memory'] tools.append(BetaMemoryTool20250818Param(name='memory', type='memory_20250818')) beta_features.append('context-management-2025-06-27') + elif isinstance(tool, MCPServerTool) and tool.url: + mcp_server_url_definition_param = BetaRequestMCPServerURLDefinitionParam( + type='url', + name=tool.id, + url=tool.url, + ) + if tool.allowed_tools is not None: # pragma: no branch + mcp_server_url_definition_param['tool_configuration'] = BetaRequestMCPServerToolConfigurationParam( + enabled=bool(tool.allowed_tools), + allowed_tools=tool.allowed_tools, + ) + if tool.authorization_token: # pragma: no cover + mcp_server_url_definition_param['authorization_token'] = tool.authorization_token + mcp_servers.append(mcp_server_url_definition_param) + beta_features.append('mcp-client-2025-04-04') else: # pragma: no cover raise UserError( f'`{tool.__class__.__name__}` is not supported by `AnthropicModel`. If it should be, please file an issue.' ) - return tools, beta_features + return tools, mcp_servers, beta_features async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[BetaMessageParam]]: # noqa: C901 """Just maps a `pydantic_ai.Message` to a `anthropic.types.MessageParam`.""" @@ -458,6 +484,8 @@ async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[Be | BetaCodeExecutionToolResultBlockParam | BetaThinkingBlockParam | BetaRedactedThinkingBlockParam + | BetaMCPToolUseBlockParam + | BetaMCPToolResultBlock ] = [] for response_part in m.parts: if isinstance(response_part, TextPart): @@ -508,7 +536,7 @@ async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[Be input=response_part.args_as_dict(), ) assistant_content_params.append(server_tool_use_block_param) - elif response_part.tool_name == CodeExecutionTool.kind: # pragma: no branch + elif response_part.tool_name == CodeExecutionTool.kind: server_tool_use_block_param = BetaServerToolUseBlockParam( id=tool_use_id, type='server_tool_use', @@ -516,6 +544,21 @@ async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[Be input=response_part.args_as_dict(), ) assistant_content_params.append(server_tool_use_block_param) + elif ( + response_part.tool_name == MCPServerTool.kind + and (args := response_part.args_as_dict()) + and (server_id := args.get('server_id')) + and (tool_name := args.get('tool_name')) + and (tool_args := args.get('tool_args')) + ): # pragma: no branch + mcp_tool_use_block_param = BetaMCPToolUseBlockParam( + id=tool_use_id, + type='mcp_tool_use', + server_name=server_id, + name=tool_name, + input=tool_args, + ) + assistant_content_params.append(mcp_tool_use_block_param) elif isinstance(response_part, BuiltinToolReturnPart): if response_part.provider_name == self.system: tool_use_id = _guard_tool_call_id(t=response_part) @@ -547,6 +590,16 @@ async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[Be ), ) ) + elif response_part.tool_name == MCPServerTool.kind and isinstance( + response_part.content, dict + ): # pragma: no branch + assistant_content_params.append( + BetaMCPToolResultBlock( + tool_use_id=tool_use_id, + type='mcp_tool_result', + **cast(dict[str, Any], response_part.content), # pyright: ignore[reportUnknownMemberType] + ) + ) elif isinstance(response_part, FilePart): # pragma: no cover # Files generated by models are not sent back to models that don't themselves generate files. pass @@ -712,6 +765,30 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: vendor_part_id=event.index, part=_map_code_execution_tool_result_block(current_block, self.provider_name), ) + elif isinstance(current_block, BetaMCPToolUseBlock): + call_part = _map_mcp_server_use_block(current_block, self.provider_name) + + args_json = call_part.args_as_json_str() + # Drop the final `{}}` so that we can add tool args deltas + args_json_delta = args_json[:-3] + assert args_json_delta.endswith('"tool_args":'), ( + f'Expected {args_json_delta!r} to end in `"tool_args":`' + ) + + yield self._parts_manager.handle_part( + vendor_part_id=event.index, part=replace(call_part, args=None) + ) + maybe_event = self._parts_manager.handle_tool_call_delta( + vendor_part_id=event.index, + args=args_json_delta, + ) + if maybe_event is not None: # pragma: no branch + yield maybe_event + elif isinstance(current_block, BetaMCPToolResultBlock): + yield self._parts_manager.handle_part( + vendor_part_id=event.index, + part=_map_mcp_server_result_block(current_block, self.provider_name), + ) elif isinstance(event, BetaRawContentBlockDeltaEvent): if isinstance(event.delta, BetaTextDelta): @@ -749,7 +826,16 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: self.provider_details = {'finish_reason': raw_finish_reason} self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) - elif isinstance(event, BetaRawContentBlockStopEvent | BetaRawMessageStopEvent): # pragma: no branch + elif isinstance(event, BetaRawContentBlockStopEvent): # pragma: no branch + if isinstance(current_block, BetaMCPToolUseBlock): + maybe_event = self._parts_manager.handle_tool_call_delta( + vendor_part_id=event.index, + args='}', + ) + if maybe_event is not None: # pragma: no branch + yield maybe_event + current_block = None + elif isinstance(event, BetaRawMessageStopEvent): # pragma: no branch current_block = None @property @@ -817,3 +903,26 @@ def _map_code_execution_tool_result_block( content=code_execution_tool_result_content_ta.dump_python(item.content, mode='json'), tool_call_id=item.tool_use_id, ) + + +def _map_mcp_server_use_block(item: BetaMCPToolUseBlock, provider_name: str) -> BuiltinToolCallPart: + return BuiltinToolCallPart( + provider_name=provider_name, + tool_name=MCPServerTool.kind, + args={ + 'action': 'call_tool', + 'server_id': item.server_name, + 'tool_name': item.name, + 'tool_args': cast(dict[str, Any], item.input), + }, + tool_call_id=item.id, + ) + + +def _map_mcp_server_result_block(item: BetaMCPToolResultBlock, provider_name: str) -> BuiltinToolReturnPart: + return BuiltinToolReturnPart( + provider_name=provider_name, + tool_name=MCPServerTool.kind, + content=item.model_dump(mode='json', include={'content', 'is_error'}), + tool_call_id=item.tool_use_id, + ) diff --git a/pydantic_ai_slim/pydantic_ai/models/openai.py b/pydantic_ai_slim/pydantic_ai/models/openai.py index 84c566c23f..09d8fb1420 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openai.py +++ b/pydantic_ai_slim/pydantic_ai/models/openai.py @@ -1,6 +1,7 @@ from __future__ import annotations as _annotations import base64 +import json import warnings from collections.abc import AsyncIterable, AsyncIterator, Sequence from contextlib import asynccontextmanager @@ -17,7 +18,7 @@ from .._run_context import RunContext from .._thinking_part import split_content_into_text_and_thinking from .._utils import guard_tool_call_id as _guard_tool_call_id, now_utc as _now_utc, number_to_datetime -from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, WebSearchTool +from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, MCPServerTool, WebSearchTool from ..exceptions import UserError from ..messages import ( AudioUrl, @@ -109,6 +110,11 @@ allows this model to be used more easily with other model types (ie, Ollama, Deepseek). """ +MCP_SERVER_TOOL_CONNECTOR_URI_SCHEME: Literal['x-openai-connector'] = 'x-openai-connector' +""" +Prefix for OpenAI connector IDs. OpenAI supports either a URL or a connector ID when passing MCP configuration to a model, +by using that prefix like `x-openai-connector:` in a URL, you can pass a connector ID to a model. +""" _CHAT_FINISH_REASON_MAP: dict[ Literal['stop', 'length', 'tool_calls', 'content_filter', 'function_call'], FinishReason @@ -1061,13 +1067,16 @@ def _process_response( # noqa: C901 elif isinstance(item, responses.ResponseFileSearchToolCall): # pragma: no cover # Pydantic AI doesn't yet support the FileSearch built-in tool pass - elif isinstance( # pragma: no cover - item, - responses.response_output_item.McpCall - | responses.response_output_item.McpListTools - | responses.response_output_item.McpApprovalRequest, - ): - # Pydantic AI supports MCP natively + elif isinstance(item, responses.response_output_item.McpCall): + call_part, return_part = _map_mcp_call(item, self.system) + items.append(call_part) + items.append(return_part) + elif isinstance(item, responses.response_output_item.McpListTools): + call_part, return_part = _map_mcp_list_tools(item, self.system) + items.append(call_part) + items.append(return_part) + elif isinstance(item, responses.response_output_item.McpApprovalRequest): # pragma: no cover + # Pydantic AI doesn't yet support McpApprovalRequest (explicit tool usage approval) pass finish_reason: FinishReason | None = None @@ -1256,6 +1265,32 @@ def _get_builtin_tools(self, model_request_parameters: ModelRequestParameters) - elif isinstance(tool, CodeExecutionTool): has_image_generating_tool = True tools.append({'type': 'code_interpreter', 'container': {'type': 'auto'}}) + elif isinstance(tool, MCPServerTool): + mcp_tool = responses.tool_param.Mcp( + type='mcp', + server_label=tool.id, + require_approval='never', + ) + + if tool.authorization_token: # pragma: no branch + mcp_tool['authorization'] = tool.authorization_token + + if tool.allowed_tools is not None: # pragma: no branch + mcp_tool['allowed_tools'] = tool.allowed_tools + + if tool.description: # pragma: no branch + mcp_tool['server_description'] = tool.description + + if tool.headers: # pragma: no branch + mcp_tool['headers'] = tool.headers + + if tool.url.startswith(MCP_SERVER_TOOL_CONNECTOR_URI_SCHEME + ':'): + _, connector_id = tool.url.split(':', maxsplit=1) + mcp_tool['connector_id'] = connector_id # pyright: ignore[reportGeneralTypeIssues] + else: + mcp_tool['server_url'] = tool.url + + tools.append(mcp_tool) elif isinstance(tool, ImageGenerationTool): # pragma: no branch has_image_generating_tool = True tools.append( @@ -1428,7 +1463,7 @@ async def _map_messages( # noqa: C901 type='web_search_call', ) openai_messages.append(web_search_item) - elif item.tool_name == ImageGenerationTool.kind and item.tool_call_id: # pragma: no branch + elif item.tool_name == ImageGenerationTool.kind and item.tool_call_id: # The cast is necessary because of https://github.com/openai/openai-python/issues/2648 image_generation_item = cast( responses.response_input_item_param.ImageGenerationCall, @@ -1438,6 +1473,37 @@ async def _map_messages( # noqa: C901 }, ) openai_messages.append(image_generation_item) + elif ( # pragma: no branch + item.tool_name == MCPServerTool.kind + and item.tool_call_id + and (args := item.args_as_dict()) + and (action := args.get('action')) + and (server_id := args.get('server_id')) + ): + if action == 'list_tools': + mcp_list_tools_item = responses.response_input_item_param.McpListTools( + id=item.tool_call_id, + type='mcp_list_tools', + server_label=server_id, + tools=[], # These can be read server-side + ) + openai_messages.append(mcp_list_tools_item) + elif ( # pragma: no branch + action == 'call_tool' + and (tool_name := args.get('tool_name')) + and (tool_args := args.get('tool_args')) + ): + mcp_call_item = responses.response_input_item_param.McpCall( + id=item.tool_call_id, + server_label=server_id, + name=tool_name, + arguments=to_json(tool_args).decode(), + error=None, # These can be read server-side + output=None, # These can be read server-side + type='mcp_call', + ) + openai_messages.append(mcp_call_item) + elif isinstance(item, BuiltinToolReturnPart): if item.provider_name == self.system and send_item_ids: if ( @@ -1456,9 +1522,12 @@ async def _map_messages( # noqa: C901 and (status := content.get('status')) ): web_search_item['status'] = status - elif item.tool_name == ImageGenerationTool.kind: # pragma: no branch + elif item.tool_name == ImageGenerationTool.kind: # Image generation result does not need to be sent back, just the `id` off of `BuiltinToolCallPart`. pass + elif item.tool_name == MCPServerTool.kind: # pragma: no branch + # MCP call result does not need to be sent back, just the fields off of `BuiltinToolCallPart`. + pass elif isinstance(item, FilePart): # This was generated by the `ImageGenerationTool` or `CodeExecutionTool`, # and does not need to be sent back separately from the corresponding `BuiltinToolReturnPart`. @@ -1772,7 +1841,7 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: args_json = call_part.args_as_json_str() # Drop the final `"}` so that we can add code deltas args_json_delta = args_json[:-2] - assert args_json_delta.endswith('code":"') + assert args_json_delta.endswith('"code":"'), f'Expected {args_json_delta!r} to end in `"code":"`' yield self._parts_manager.handle_part( vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None) @@ -1786,7 +1855,28 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: elif isinstance(chunk.item, responses.response_output_item.ImageGenerationCall): call_part, _, _ = _map_image_generation_tool_call(chunk.item, self.provider_name) yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-call', part=call_part) + elif isinstance(chunk.item, responses.response_output_item.McpCall): + call_part, _ = _map_mcp_call(chunk.item, self.provider_name) + args_json = call_part.args_as_json_str() + # Drop the final `{}}` so that we can add tool args deltas + args_json_delta = args_json[:-3] + assert args_json_delta.endswith('"tool_args":'), ( + f'Expected {args_json_delta!r} to end in `"tool_args":"`' + ) + + yield self._parts_manager.handle_part( + vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None) + ) + maybe_event = self._parts_manager.handle_tool_call_delta( + vendor_part_id=f'{chunk.item.id}-call', + args=args_json_delta, + ) + if maybe_event is not None: # pragma: no branch + yield maybe_event + elif isinstance(chunk.item, responses.response_output_item.McpListTools): + call_part, _ = _map_mcp_list_tools(chunk.item, self.provider_name) + yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-call', part=call_part) else: warnings.warn( # pragma: no cover f'Handling of this item type is not yet implemented. Please report on our GitHub: {chunk}', @@ -1827,6 +1917,13 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-file', part=file_part) yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part) + elif isinstance(chunk.item, responses.response_output_item.McpCall): + _, return_part = _map_mcp_call(chunk.item, self.provider_name) + yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part) + elif isinstance(chunk.item, responses.response_output_item.McpListTools): + _, return_part = _map_mcp_list_tools(chunk.item, self.provider_name) + yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part) + elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent): yield self._parts_manager.handle_thinking_delta( vendor_part_id=f'{chunk.item_id}-{chunk.summary_index}', @@ -1921,6 +2018,40 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: ) yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item_id}-file', part=file_part) + elif isinstance(chunk, responses.ResponseMcpCallArgumentsDoneEvent): + maybe_event = self._parts_manager.handle_tool_call_delta( + vendor_part_id=f'{chunk.item_id}-call', + args='}', + ) + if maybe_event is not None: # pragma: no branch + yield maybe_event + + elif isinstance(chunk, responses.ResponseMcpCallArgumentsDeltaEvent): + maybe_event = self._parts_manager.handle_tool_call_delta( + vendor_part_id=f'{chunk.item_id}-call', + args=chunk.delta, + ) + if maybe_event is not None: # pragma: no branch + yield maybe_event + + elif isinstance(chunk, responses.ResponseMcpListToolsInProgressEvent): + pass # there's nothing we need to do here + + elif isinstance(chunk, responses.ResponseMcpListToolsCompletedEvent): + pass # there's nothing we need to do here + + elif isinstance(chunk, responses.ResponseMcpListToolsFailedEvent): # pragma: no cover + pass # there's nothing we need to do here + + elif isinstance(chunk, responses.ResponseMcpCallInProgressEvent): + pass # there's nothing we need to do here + + elif isinstance(chunk, responses.ResponseMcpCallFailedEvent): # pragma: no cover + pass # there's nothing we need to do here + + elif isinstance(chunk, responses.ResponseMcpCallCompletedEvent): + pass # there's nothing we need to do here + else: # pragma: no cover warnings.warn( f'Handling of this event type is not yet implemented. Please report on our GitHub: {chunk}', @@ -1990,7 +2121,6 @@ def _map_usage( def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]: # When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields. # Before our `ToolCallPart` gained the `id` field alongside `tool_call_id` field, we combined the two fields into a single string stored on `tool_call_id`. - if '|' in combined_id: call_id, id = combined_id.split('|', 1) return call_id, id @@ -2030,7 +2160,7 @@ def _map_code_interpreter_tool_call( tool_call_id=item.id, args={ 'container_id': item.container_id, - 'code': item.code, + 'code': item.code or '', }, provider_name=provider_name, ), @@ -2122,3 +2252,49 @@ def _map_image_generation_tool_call( ), file_part, ) + + +def _map_mcp_list_tools( + item: responses.response_output_item.McpListTools, provider_name: str +) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]: + return ( + BuiltinToolCallPart( + tool_name=MCPServerTool.kind, + tool_call_id=item.id, + provider_name=provider_name, + args={'action': 'list_tools', 'server_id': item.server_label}, + ), + BuiltinToolReturnPart( + tool_name=MCPServerTool.kind, + tool_call_id=item.id, + content=item.model_dump(mode='json', include={'tools', 'error'}), + provider_name=provider_name, + ), + ) + + +def _map_mcp_call( + item: responses.response_output_item.McpCall, provider_name: str +) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]: + return ( + BuiltinToolCallPart( + tool_name=MCPServerTool.kind, + tool_call_id=item.id, + args={ + 'action': 'call_tool', + 'server_id': item.server_label, + 'tool_name': item.name, + 'tool_args': json.loads(item.arguments) if item.arguments else {}, + }, + provider_name=provider_name, + ), + BuiltinToolReturnPart( + tool_name=MCPServerTool.kind, + tool_call_id=item.id, + content={ + 'output': item.output, + 'error': item.error, + }, + provider_name=provider_name, + ), + ) diff --git a/tests/models/cassettes/test_anthropic/test_anthropic_mcp_servers.yaml b/tests/models/cassettes/test_anthropic/test_anthropic_mcp_servers.yaml new file mode 100644 index 0000000000..89eb7e0971 --- /dev/null +++ b/tests/models/cassettes/test_anthropic/test_anthropic_mcp_servers.yaml @@ -0,0 +1,401 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '346' + content-type: + - application/json + host: + - api.anthropic.com + method: POST + parsed_body: + max_tokens: 4096 + mcp_servers: + - name: deepwiki + type: url + url: https://mcp.deepwiki.com/mcp + messages: + - content: + - text: Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short + type: text + role: user + model: claude-sonnet-4-0 + stream: false + thinking: + budget_tokens: 3000 + type: enabled + uri: https://api.anthropic.com/v1/messages?beta=true + response: + headers: + connection: + - keep-alive + content-length: + - '6664' + content-type: + - application/json + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + content: + - signature: EqUDCkYICBgCKkCTiLjx5Rzw9zXo4pFDhFAc9Ci1R+d2fpkiqw7IPt1PgxBankr7bhRfh2iQOFEUy7sYVtsBxvnHW8zfBRxH1j6lEgySvdOyObrcFdJX3qkaDMAMCdLHIevZ/mSx/SIwi917U34N5jLQH1yMoCx/k72klLG5v42vcwUTG4ngKDI69Ddaf0eeDpgg3tL5FHfvKowCnslWg3Pd3ITe+TLlzu+OVZhRKU9SEwDJbjV7ZF954Ls6XExAfjdXhrhvXDB+hz6fZFPGFEfXV7jwElFT5HcGPWy84xvlwzbklZ2zH3XViik0B5dMErMAKs6IVwqXo3s+0p9xtX5gCBuvLkalET2upNsmdKGJv7WQWoaLch5N07uvSgWkO8AkGuVtBgqZH+uRGlPfYlnAgifNHu00GSAVK3beeyZfpnSQ6LQKcH+wVmrOi/3UvzA5f1LvsXG32gQKUCxztATnlBaI+7GMs1IAloaRHBndyRoe8Lwv79zZe9u9gnF9WCgK3yQsAR5hGZXlBKiIWfnRrXQ7QmA2hVO+mhEOCnz7OQkMIEUlfxgB + thinking: The user is asking about the pydantic/pydantic-ai repository and wants me to keep the answer short. I should + use the deepwiki tools to get information about this repository. Let me start by asking a general question about + what this repository is about. + type: thinking + - id: mcptoolu_01SAss3KEwASziHZoMR6HcZU + input: + question: What is pydantic-ai and what does this repository do? + repoName: pydantic/pydantic-ai + name: ask_question + server_name: deepwiki + type: mcp_tool_use + - content: + - text: | + Pydantic AI is a Python agent framework designed to simplify the development of production-grade applications using Generative AI . The repository contains this framework, which emphasizes an ergonomic developer experience and type-safety, similar to Pydantic and FastAPI . + + ## Core Functionality + + The `pydantic-ai` repository provides a comprehensive set of tools and abstractions for building AI agents . Its core components include: + + * **Type-safe agents**: Agents are defined using `Agent[Deps, Output]` for compile-time validation . + * **Model-agnostic design**: It supports over 15 LLM providers through a unified `Model` interface . + * **Structured outputs**: Automatic Pydantic validation and self-correction ensure structured outputs . + * **Observability**: Integration with OpenTelemetry and Logfire provides comprehensive observability . + * **Production-ready tooling**: This includes an evaluation framework, durable execution, and protocol integrations . + + ## Repository Structure + + The `pydantic-ai` repository is organized as a UV workspace monorepo, containing several packages : + + * `pydantic-ai-slim`: This is the core framework, containing modules for `Agent`, `Model`, and tools . + * `pydantic-graph`: Provides the graph execution engine . + * `pydantic-evals`: Offers an evaluation framework for datasets and evaluators . + * `examples`: Contains example applications . + * `clai`: A CLI interface for the framework . + * `pydantic-ai`: A meta-package that includes all optional extras . + + ## Agent Execution + + The `Agent` class is the primary interface for orchestrating AI tasks . It coordinates the model abstraction, message protocol, tool system, and graph execution . Agent execution uses a graph-based state machine from `pydantic_graph.Graph` . The execution flow involves three core node types : + + * `UserPromptNode`: Processes user input and creates initial `ModelRequest` . + * `ModelRequestNode`: Calls `model.request()` or `model.request_stream()` and handles retries . + * `CallToolsNode`: Executes tool functions via `RunContext[Deps]` . + + The `Agent` class provides methods like `run()`, `run_sync()`, and `run_stream()` for different execution needs . + + ## Key Features + + ### Model Provider Support + Pydantic AI offers a unified `Model` abstract base class for various LLM providers, including OpenAI, Anthropic, Google, and more . All models implement `request(messages)` and `request_stream(messages)` methods . + + ### Tool System + The framework includes a tool system for registering and executing functions via decorators like `@agent.tool` . It automatically generates JSON schemas from function signatures and docstrings, validates tool call arguments, and injects `RunContext[Deps]` for dependencies . + + ### Observability + Pydantic AI integrates with OpenTelemetry, providing instrumentation for agent runs, model requests, and tool executions . It also has native integration with Pydantic Logfire for real-time debugging and performance monitoring . + + ### Evaluation Framework + The `pydantic-evals` package provides a framework for systematically testing and evaluating agent performance . It supports datasets with `Case` objects, various evaluators, and generates `EvaluationReport` with aggregated metrics . + + ## Integrations + Pydantic AI supports various integrations for development and production, including : + * **Model Context Protocol (MCP)**: For external tool server access . + * **AG-UI Protocol**: For interactive application frontends . + * **Agent2Agent (A2A)**: For multi-agent communication . + * **Temporal**: For durable workflow execution . + * **DBOS**: For database-backed execution . + + Notes: + The `pydantic-ai-slim` package is a core version of the framework, allowing users to install only necessary dependencies for specific models or features . The `pydantic-ai` package is a meta-package that includes all optional extras . + + Wiki pages you might want to explore: + - [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1) + + View this search on DeepWiki: https://deepwiki.com/search/what-is-pydanticai-and-what-do_2dc46d92-288e-470b-b52e-24bb13f839c7 + type: text + is_error: false + tool_use_id: mcptoolu_01SAss3KEwASziHZoMR6HcZU + type: mcp_tool_result + - text: |- + **Pydantic AI** is a Python agent framework for building production-grade applications with Generative AI. It provides: + + - **Type-safe agents** with compile-time validation using `Agent[Deps, Output]` + - **Model-agnostic design** supporting 15+ LLM providers (OpenAI, Anthropic, Google, etc.) + - **Structured outputs** with automatic Pydantic validation and self-correction + - **Built-in observability** via OpenTelemetry and Logfire integration + - **Production tooling** including evaluation framework, durable execution, and tool system + + The repo is organized as a monorepo with core packages like `pydantic-ai-slim` (core framework), `pydantic-graph` (execution engine), and `pydantic-evals` (evaluation tools). It emphasizes developer ergonomics and type safety, similar to Pydantic and FastAPI. + type: text + id: msg_01MYDjkvBDRaKsY6PDwQz3n6 + model: claude-sonnet-4-20250514 + role: assistant + stop_reason: end_turn + stop_sequence: null + type: message + usage: + cache_creation: + ephemeral_1h_input_tokens: 0 + ephemeral_5m_input_tokens: 0 + cache_creation_input_tokens: 0 + cache_read_input_tokens: 0 + input_tokens: 2674 + output_tokens: 373 + server_tool_use: + web_search_requests: 0 + service_tier: standard + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '6733' + content-type: + - application/json + host: + - api.anthropic.com + method: POST + parsed_body: + max_tokens: 4096 + mcp_servers: + - name: deepwiki + type: url + url: https://mcp.deepwiki.com/mcp + messages: + - content: + - text: Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short + type: text + role: user + - content: + - signature: EqUDCkYICBgCKkCTiLjx5Rzw9zXo4pFDhFAc9Ci1R+d2fpkiqw7IPt1PgxBankr7bhRfh2iQOFEUy7sYVtsBxvnHW8zfBRxH1j6lEgySvdOyObrcFdJX3qkaDMAMCdLHIevZ/mSx/SIwi917U34N5jLQH1yMoCx/k72klLG5v42vcwUTG4ngKDI69Ddaf0eeDpgg3tL5FHfvKowCnslWg3Pd3ITe+TLlzu+OVZhRKU9SEwDJbjV7ZF954Ls6XExAfjdXhrhvXDB+hz6fZFPGFEfXV7jwElFT5HcGPWy84xvlwzbklZ2zH3XViik0B5dMErMAKs6IVwqXo3s+0p9xtX5gCBuvLkalET2upNsmdKGJv7WQWoaLch5N07uvSgWkO8AkGuVtBgqZH+uRGlPfYlnAgifNHu00GSAVK3beeyZfpnSQ6LQKcH+wVmrOi/3UvzA5f1LvsXG32gQKUCxztATnlBaI+7GMs1IAloaRHBndyRoe8Lwv79zZe9u9gnF9WCgK3yQsAR5hGZXlBKiIWfnRrXQ7QmA2hVO+mhEOCnz7OQkMIEUlfxgB + thinking: The user is asking about the pydantic/pydantic-ai repository and wants me to keep the answer short. I + should use the deepwiki tools to get information about this repository. Let me start by asking a general question + about what this repository is about. + type: thinking + - id: mcptoolu_01SAss3KEwASziHZoMR6HcZU + input: + question: What is pydantic-ai and what does this repository do? + repoName: pydantic/pydantic-ai + name: ask_question + server_name: deepwiki + type: mcp_tool_use + - content: + - citations: null + text: | + Pydantic AI is a Python agent framework designed to simplify the development of production-grade applications using Generative AI . The repository contains this framework, which emphasizes an ergonomic developer experience and type-safety, similar to Pydantic and FastAPI . + + ## Core Functionality + + The `pydantic-ai` repository provides a comprehensive set of tools and abstractions for building AI agents . Its core components include: + + * **Type-safe agents**: Agents are defined using `Agent[Deps, Output]` for compile-time validation . + * **Model-agnostic design**: It supports over 15 LLM providers through a unified `Model` interface . + * **Structured outputs**: Automatic Pydantic validation and self-correction ensure structured outputs . + * **Observability**: Integration with OpenTelemetry and Logfire provides comprehensive observability . + * **Production-ready tooling**: This includes an evaluation framework, durable execution, and protocol integrations . + + ## Repository Structure + + The `pydantic-ai` repository is organized as a UV workspace monorepo, containing several packages : + + * `pydantic-ai-slim`: This is the core framework, containing modules for `Agent`, `Model`, and tools . + * `pydantic-graph`: Provides the graph execution engine . + * `pydantic-evals`: Offers an evaluation framework for datasets and evaluators . + * `examples`: Contains example applications . + * `clai`: A CLI interface for the framework . + * `pydantic-ai`: A meta-package that includes all optional extras . + + ## Agent Execution + + The `Agent` class is the primary interface for orchestrating AI tasks . It coordinates the model abstraction, message protocol, tool system, and graph execution . Agent execution uses a graph-based state machine from `pydantic_graph.Graph` . The execution flow involves three core node types : + + * `UserPromptNode`: Processes user input and creates initial `ModelRequest` . + * `ModelRequestNode`: Calls `model.request()` or `model.request_stream()` and handles retries . + * `CallToolsNode`: Executes tool functions via `RunContext[Deps]` . + + The `Agent` class provides methods like `run()`, `run_sync()`, and `run_stream()` for different execution needs . + + ## Key Features + + ### Model Provider Support + Pydantic AI offers a unified `Model` abstract base class for various LLM providers, including OpenAI, Anthropic, Google, and more . All models implement `request(messages)` and `request_stream(messages)` methods . + + ### Tool System + The framework includes a tool system for registering and executing functions via decorators like `@agent.tool` . It automatically generates JSON schemas from function signatures and docstrings, validates tool call arguments, and injects `RunContext[Deps]` for dependencies . + + ### Observability + Pydantic AI integrates with OpenTelemetry, providing instrumentation for agent runs, model requests, and tool executions . It also has native integration with Pydantic Logfire for real-time debugging and performance monitoring . + + ### Evaluation Framework + The `pydantic-evals` package provides a framework for systematically testing and evaluating agent performance . It supports datasets with `Case` objects, various evaluators, and generates `EvaluationReport` with aggregated metrics . + + ## Integrations + Pydantic AI supports various integrations for development and production, including : + * **Model Context Protocol (MCP)**: For external tool server access . + * **AG-UI Protocol**: For interactive application frontends . + * **Agent2Agent (A2A)**: For multi-agent communication . + * **Temporal**: For durable workflow execution . + * **DBOS**: For database-backed execution . + + Notes: + The `pydantic-ai-slim` package is a core version of the framework, allowing users to install only necessary dependencies for specific models or features . The `pydantic-ai` package is a meta-package that includes all optional extras . + + Wiki pages you might want to explore: + - [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1) + + View this search on DeepWiki: https://deepwiki.com/search/what-is-pydanticai-and-what-do_2dc46d92-288e-470b-b52e-24bb13f839c7 + type: text + is_error: false + tool_use_id: mcptoolu_01SAss3KEwASziHZoMR6HcZU + type: mcp_tool_result + - text: |- + **Pydantic AI** is a Python agent framework for building production-grade applications with Generative AI. It provides: + + - **Type-safe agents** with compile-time validation using `Agent[Deps, Output]` + - **Model-agnostic design** supporting 15+ LLM providers (OpenAI, Anthropic, Google, etc.) + - **Structured outputs** with automatic Pydantic validation and self-correction + - **Built-in observability** via OpenTelemetry and Logfire integration + - **Production tooling** including evaluation framework, durable execution, and tool system + + The repo is organized as a monorepo with core packages like `pydantic-ai-slim` (core framework), `pydantic-graph` (execution engine), and `pydantic-evals` (evaluation tools). It emphasizes developer ergonomics and type safety, similar to Pydantic and FastAPI. + type: text + role: assistant + - content: + - text: How about the pydantic repo in the same org? + type: text + role: user + model: claude-sonnet-4-0 + stream: false + thinking: + budget_tokens: 3000 + type: enabled + uri: https://api.anthropic.com/v1/messages?beta=true + response: + headers: + connection: + - keep-alive + content-length: + - '6408' + content-type: + - application/json + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + content: + - signature: EtECCkYICBgCKkAkKy+K3Z/q4dGwZGr1MdsH8HLaULElUSaa/Y8A1L/Jp7y1AfJd1zrTL7Zfa2KoPr0HqO/AI/cJJreheuwcn/dWEgw0bPLie900a4h9wS0aDACnsdbr+adzpUyExiIwyuNjV82BVkK/kU+sMyrfbhgb6ob/DUgudJPaK5zR6cINAAGQnIy3iOXTwu3OUfPAKrgBzF9HD5HjiPSJdsxlkI0RA5Yjiol05/hR3fUB6WWrs0aouxIzlriJ6NzmzvqctkFJdRgAL9Mh06iK1A61PLyBWRdo1f5TBziFP1c6z7iQQzH9DdcaHvG8yLoaadbyTxMvTn2PtfEcSPjuZcLgv7QcF+HZXbDVjsHJW78OK2ta0M6/xuU1p4yG3qgoss3b0G6fAyvUVgVbb1wknkE/9W9gd2k/ZSh4P7F6AcvLTXQScTyMfWRtAWQqABgB + thinking: The user is asking about the pydantic repo in the same org, so that would be pydantic/pydantic. I should + ask about what this repository does and provide a short answer. + type: thinking + - id: mcptoolu_01A9RvAqDeoUnaMgQc6Nn75y + input: + question: What is Pydantic and what does this repository do? + repoName: pydantic/pydantic + name: ask_question + server_name: deepwiki + type: mcp_tool_use + - content: + - text: | + Pydantic is a Python library for data validation, parsing, and serialization using type hints . This repository, `pydantic/pydantic`, contains the source code for the Pydantic library itself, including its core validation logic, documentation, and continuous integration/continuous deployment (CI/CD) pipelines . + + ## What is Pydantic + + Pydantic is designed to ensure that data conforms to specified types and constraints at runtime . It leverages Python type hints to define data schemas and provides mechanisms for data conversion and validation . The library's core validation logic is implemented in Rust within a separate package called `pydantic-core`, which contributes to its performance . + + Pydantic offers several user-facing APIs for validation: + * `BaseModel`: Used for defining class-based models with fields, suitable for domain models, API schemas, and configuration . + * `TypeAdapter`: Provides a flexible way to validate and serialize arbitrary Python types, including primitive types and dataclasses . + * `@dataclass`: Enhances Python's built-in dataclasses with Pydantic's validation capabilities . + * `@validate_call`: Used for validating function arguments and return values . + + ## What this Repository Does + + The `pydantic/pydantic` repository serves as the development hub for the Pydantic library. Its primary functions include: + + ### Core Library Development + The repository contains the Python source code for the Pydantic library, including modules for `BaseModel` , `Field` definitions , configuration management , and type adapters . It also includes internal modules responsible for model construction and schema generation . + + ### Documentation + The repository hosts the documentation for Pydantic, which is built using MkDocs . The documentation covers installation instructions , core concepts like models , fields, and JSON Schema generation . It also includes information on contributing to the project . + + ### Continuous Integration and Deployment (CI/CD) + The repository utilizes GitHub Actions for its CI/CD pipeline . This pipeline includes: + * **Linting**: Checks code quality and style . + * **Testing**: Runs a comprehensive test suite across multiple operating systems and Python versions . This includes memory profiling tests, Mypy plugin tests, and type-checking integration tests . + * **Coverage**: Aggregates test coverage data and posts comments to pull requests . + * **Release Process**: Automates publishing new versions to PyPI and sending release announcements . + * **Third-Party Integration Testing**: Tests Pydantic's compatibility with other popular libraries like FastAPI, SQLModel, and Beanie . + * **Dependency Management**: Uses `uv` for managing dependencies and includes workflows to check compatibility with various dependency versions . + * **Performance Benchmarking**: Utilizes CodSpeed to track and analyze performance . + + ## Versioning and Compatibility + Pydantic maintains strict version compatibility between the pure Python package (`pydantic`) and its Rust-based validation core (`pydantic-core`) . A `SystemError` is raised if there's a mismatch in `pydantic-core` versions, ensuring a stable environment . The `version_info()` function provides detailed version information for Pydantic and its dependencies . + + Notes: + The `CITATION.cff` file also provides a concise description of Pydantic as "the most widely used data validation library for Python" . The `README.md` and `docs/index.md` files reiterate this, emphasizing its speed and extensibility . + + Wiki pages you might want to explore: + - [Overview (pydantic/pydantic)](/wiki/pydantic/pydantic#1) + - [Development and Deployment (pydantic/pydantic)](/wiki/pydantic/pydantic#7) + + View this search on DeepWiki: https://deepwiki.com/search/what-is-pydantic-and-what-does_dab96efa-752a-4688-a630-3f4658084a88 + type: text + is_error: false + tool_use_id: mcptoolu_01A9RvAqDeoUnaMgQc6Nn75y + type: mcp_tool_result + - text: |- + **Pydantic** is Python's most widely used data validation library for parsing, validation, and serialization using type hints. The repository contains: + + **Core Features:** + - **Data validation** with automatic type conversion and constraint checking + - **Multiple APIs**: `BaseModel` for class-based models, `TypeAdapter` for arbitrary types, `@dataclass` decorator, and `@validate_call` for functions + - **High performance** via Rust-based validation core (`pydantic-core`) + - **JSON Schema generation** and comprehensive serialization support + + **Repository Contents:** + - Python source code for the main Pydantic library + - Comprehensive documentation built with MkDocs + - Extensive CI/CD pipeline with testing across multiple Python versions and OS + - Integration testing with popular libraries (FastAPI, SQLModel, etc.) + - Performance benchmarking and dependency compatibility checks + + Pydantic ensures runtime data integrity through type hints and is foundational to many Python frameworks, especially in web APIs and data processing applications. + type: text + id: msg_01DSGib8F7nNoYprfYSGp1sd + model: claude-sonnet-4-20250514 + role: assistant + stop_reason: end_turn + stop_sequence: null + type: message + usage: + cache_creation: + ephemeral_1h_input_tokens: 0 + ephemeral_5m_input_tokens: 0 + cache_creation_input_tokens: 0 + cache_read_input_tokens: 0 + input_tokens: 5262 + output_tokens: 369 + server_tool_use: + web_search_requests: 0 + service_tier: standard + status: + code: 200 + message: OK +version: 1 diff --git a/tests/models/cassettes/test_anthropic/test_anthropic_mcp_servers_stream.yaml b/tests/models/cassettes/test_anthropic/test_anthropic_mcp_servers_stream.yaml new file mode 100644 index 0000000000..b742ded9a1 --- /dev/null +++ b/tests/models/cassettes/test_anthropic/test_anthropic_mcp_servers_stream.yaml @@ -0,0 +1,245 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '416' + content-type: + - application/json + host: + - api.anthropic.com + method: POST + parsed_body: + max_tokens: 4096 + mcp_servers: + - name: deepwiki + tool_configuration: + allowed_tools: + - ask_question + enabled: true + type: url + url: https://mcp.deepwiki.com/mcp + messages: + - content: + - text: Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short + type: text + role: user + model: claude-sonnet-4-5 + stream: true + thinking: + budget_tokens: 3000 + type: enabled + uri: https://api.anthropic.com/v1/messages?beta=true + response: + body: + string: |+ + event: message_start + data: {"type":"message_start","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01Xf6SmUVY1mDrSwFc5RsY3n","type":"message","role":"assistant","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":690,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":8,"service_tier":"standard"}} } + + event: content_block_start + data: {"type":"content_block_start","index":0,"content_block":{"type":"thinking","thinking":"","signature":""} } + + event: content_block_delta + data: {"type":"content_block_delta","index":0,"delta":{"type":"thinking_delta","thinking":"The user is asking about the p"} } + + event: ping + data: {"type": "ping"} + + event: content_block_delta + data: {"type":"content_block_delta","index":0,"delta":{"type":"thinking_delta","thinking":"ydantic/pydantic-ai"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":0,"delta":{"type":"thinking_delta","thinking":" repository. They want a short answer about"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":0,"delta":{"type":"thinking_delta","thinking":" the repo. I should use the deepwiki_ask_question"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":0,"delta":{"type":"thinking_delta","thinking":" function to get information about this repository."} } + + event: content_block_delta + data: {"type":"content_block_delta","index":0,"delta":{"type":"signature_delta","signature":"EuoCCkYICBgCKkDPqznnPHupi9rVXvaQQqrMprXof9wtQsCqw7Yw687UIk/FvF65omU22QO+CmIcYqTwhBfifPEp9A3/lM9C8cIcEgzGsjorcyNe2H0ZFf8aDCA4iLG6qgUL6fLhzCIwVWcg65CrvSFusXtMH18p+XiF+BUxT+rvnCFsnLbFsxtjGyKh1j4UW6V0Tk0O7+3sKtEBEzvxztXkMkeXkXRsQFJ00jTNhkUHu74sqnh6QxgV8wK2vlJRnBnes/oh7QdED0h/pZaUbxplYJiPFisWx/zTJQvOv29I46sM2CdY5ggGO1KWrEF/pognyod+jdCdb481XUET9T7nl/VMz/Og2QkyGf+5MvSecKQhujlS0VFhCgaYv68sl0Fv3hj2AkeE4vcYu3YdDaNDLXerbIaLCMkkn08NID/wKZTwtLSL+N6+kOi+4peGqXDNps8oa3mqIn7NAWFlwEUrFZd5kjtDkQ5dw/IYAQ=="} } + + event: content_block_stop + data: {"type":"content_block_stop","index":0 } + + event: content_block_start + data: {"type":"content_block_start","index":1,"content_block":{"type":"mcp_tool_use","id":"mcptoolu_01FZmJ5UspaX5BB9uU339UT1","name":"ask_question","input":{},"server_name":"deepwiki"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":""} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"{\"repoName\""} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":": \""} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"pydantic"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"/pydantic-ai"}} + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"\""} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":", \"question"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"\": \"What"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":" is "} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"this repo"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"sitory about"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"? Wha"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"t are i"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"ts main feat"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"ure"}} + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"s and purpo"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":1,"delta":{"type":"input_json_delta","partial_json":"se?\"}"} } + + event: content_block_stop + data: {"type":"content_block_stop","index":1 } + + event: content_block_start + data: {"type":"content_block_start","index":2,"content_block":{"type":"mcp_tool_result","tool_use_id":"mcptoolu_01FZmJ5UspaX5BB9uU339UT1","is_error":false,"content":[{"type":"text","text":"This repository, `pydantic/pydantic-ai`, is a GenAI Agent Framework that leverages Pydantic for building Generative AI applications. Its main purpose is to provide a unified and type-safe way to interact with various large language models (LLMs) from different providers, manage agent execution flows, and integrate with external tools and services. \n\n## Main Features and Purpose\n\nThe `pydantic-ai` repository offers several core features:\n\n### 1. Agent System\nThe `Agent` class serves as the main orchestrator for managing interactions with LLMs and executing tasks. Agents can be configured with generic types for dependency injection (`Agent[AgentDepsT, OutputDataT]`) and output validation, ensuring type safety throughout the application. \n\nAgents support various execution methods:\n* `agent.run()`: An asynchronous function that returns a completed `RunResult`. \n* `agent.run_sync()`: A synchronous function that internally calls `run()` to return a completed `RunResult`. \n* `agent.run_stream()`: An asynchronous context manager for streaming text and structured output. \n* `agent.run_stream_events()`: Returns an asynchronous iterable of `AgentStreamEvent`s and a final `AgentRunResultEvent`. \n* `agent.iter()`: A context manager that provides an asynchronous iterable over the nodes of the agent's underlying `Graph`, allowing for deeper control and insight into the execution flow. \n\n### 2. Model Integration\nThe framework provides a unified interface for integrating with various LLM providers, including OpenAI, Anthropic, Google, Groq, Cohere, Mistral, Bedrock, and HuggingFace. Each model integration follows a consistent settings pattern with provider-specific prefixes (e.g., `google_*`, `anthropic_*`). \n\nExamples of supported models and their capabilities include:\n* `GoogleModel`: Integrates with Google's Gemini API, supporting both Gemini API (`google-gla`) and Vertex AI (`google-vertex`) providers. It supports token counting, streaming, built-in tools like `WebSearchTool`, `UrlContextTool`, `CodeExecutionTool`, and native JSON schema output. \n* `AnthropicModel`: Uses Anthropic's beta API for advanced features like \"Thinking Blocks\" and built-in tools. \n* `GroqModel`: Offers high-speed inference and specialized reasoning support with configurable reasoning formats. \n* `MistralModel`: Supports customizable JSON schema prompting and thinking support. \n* `BedrockConverseModel`: Utilizes AWS Bedrock's Converse API for unified access to various foundation models like Claude, Titan, Llama, and Mistral. \n* `CohereModel`: Integrates with Cohere's v2 API for chat completions, including thinking support and tool calling. \n\nThe framework also supports multimodal inputs such as `AudioUrl`, `DocumentUrl`, `ImageUrl`, and `VideoUrl`, allowing agents to process and respond to diverse content types. \n\n### 3. Graph-based Execution\nPydantic AI uses `pydantic-graph` to manage the execution flow of agents, representing it as a finite state machine. The execution typically flows through `UserPromptNode` → `ModelRequestNode` → `CallToolsNode`. This allows for detailed tracking of message history and usage. \n\n### 4. Tool System\nFunction tools enable models to perform actions and retrieve additional information. Tools can be registered using decorators like `@agent.tool` (for tools needing `RunContext` access) or `@agent.tool_plain` (for tools without `RunContext` access). The framework also supports toolsets for managing collections of tools. \n\nTools can return various types of output, including anything Pydantic can serialize to JSON, as well as multimodal content like `AudioUrl`, `VideoUrl`, `ImageUrl`, or `DocumentUrl`. The `ToolReturn` object allows for separating the `return_value` (for the model), `content` (for additional context), and `metadata` (for application-specific use). \n\nBuilt-in tools like `UrlContextTool` allow agents to pull web content into their context. \n\n### 5. Output Handling\nThe framework supports various output types:\n* `TextOutput`: Plain text responses. \n* `ToolOutput`: Structured data via tool calls. \n* `NativeOutput`: Provider-specific structured output. \n* `PromptedOutput`: Prompt-based structured extraction. \n\n### 6. Durable Execution\nPydantic AI integrates with durable execution systems like DBOS and Temporal. This allows agents to maintain state and resume execution after failures or restarts, making them suitable for long-running or fault-tolerant applications. \n\n### 7. Multi-Agent Patterns and Integrations\nThe repository supports multi-agent applications and various integrations, including:\n* Pydantic Evals: For evaluating agent performance. \n* Pydantic Graph: The underlying graph execution engine. \n* Logfire: For debugging and monitoring. \n* Agent-User Interaction (AG-UI) and Agent2Agent (A2A): For facilitating interactions between agents and users, and between agents themselves. \n* Clai: A CLI tool. \n\n## Purpose\n\nThe overarching purpose of `pydantic-ai` is to simplify the development of robust and reliable Generative AI applications by providing a structured, type-safe, and extensible framework. It aims to abstract away the complexities of interacting with different LLM providers and managing agent workflows, allowing developers to focus on application logic. \n\nNotes:\nThe `CLAUDE.md` file provides guidance for Claude Code when working with the repository, outlining development commands and project architecture. The `mkdocs.yml` file defines the structure and content of the project's documentation, further detailing the features and organization of the repository. \n\nWiki pages you might want to explore:\n- [Google, Anthropic and Other Providers (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#3.3)\n\nView this search on DeepWiki: https://deepwiki.com/search/what-is-this-repository-about_5104a64d-2f5e-4461-80d8-eb0892242441\n"}]} } + + event: content_block_stop + data: {"type":"content_block_stop","index":2 } + + event: content_block_start + data: {"type":"content_block_start","index":3,"content_block":{"type":"text","text":""} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"**"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"Pydantic-AI"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"**"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" is"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" a framework for building"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" Generative AI applications with type"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" safety. It provides"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":":\n\n-"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" **"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"Unified L"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"LM interface"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"** - Works"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" with OpenAI, Anthropic, Google"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":", Groq, Cohere, Mistral, AWS"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" Bedrock, and more"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"\n- **Type-safe agents"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"** - Uses Pydantic for validation and type"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" checking throughout\n- **Tool integration** - Easily"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" add custom functions/tools agents can call\n- **Graph-"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"based execution** - Manages agent workflows as finite state machines\n- **Multiple"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" output formats** - Text, structured data, an"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"d multimodal content\n- **Durable execution**"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" - Integration with systems like DBOS and Temporal for"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" fault tolerance\n- **Streaming support** - Stream"}} + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" responses in real-time\n\nIt's designe"} } + + event: ping + data: {"type": "ping"} + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":"d to simplify building robust, production-ready AI agents"} } + + event: content_block_delta + data: {"type":"content_block_delta","index":3,"delta":{"type":"text_delta","text":" while abstracting away provider-specific complexities."} } + + event: content_block_stop + data: {"type":"content_block_stop","index":3 } + + event: message_delta + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":3042,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":354,"server_tool_use":{"web_search_requests":0}} } + + event: message_stop + data: {"type":"message_stop" } + + headers: + cache-control: + - no-cache + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/models/cassettes/test_openai_responses/test_openai_responses_model_mcp_server_tool.yaml b/tests/models/cassettes/test_openai_responses/test_openai_responses_model_mcp_server_tool.yaml new file mode 100644 index 0000000000..992e1e7392 --- /dev/null +++ b/tests/models/cassettes/test_openai_responses/test_openai_responses_model_mcp_server_tool.yaml @@ -0,0 +1,375 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '514' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + include: + - reasoning.encrypted_content + input: + - content: Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short + role: user + instructions: You are a helpful assistant. + model: o4-mini + stream: false + tool_choice: auto + tools: + - allowed_tools: + - ask_question + headers: + custom-header-key: custom-header-value + require_approval: never + server_description: DeepWiki MCP server + server_label: deepwiki + server_url: https://mcp.deepwiki.com/mcp + type: mcp + uri: https://api.openai.com/v1/responses + response: + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '11759' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '26025' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + background: false + billing: + payer: developer + created_at: 1761262977 + error: null + id: resp_0083938b3a28070e0068fabd81970881a0a1195f2cab45bd04 + incomplete_details: null + instructions: You are a helpful assistant. + max_output_tokens: null + max_tool_calls: null + metadata: {} + model: o4-mini-2025-04-16 + object: response + output: + - id: mcpl_0083938b3a28070e0068fabd81d51081a09d4b183ced693273 + server_label: deepwiki + tools: + - annotations: + read_only: false + description: Ask any question about a GitHub repository + input_schema: + $schema: http://json-schema.org/draft-07/schema# + additionalProperties: false + properties: + question: + description: The question to ask about the repository + type: string + repoName: + description: 'GitHub repository: owner/repo (e.g. "facebook/react")' + type: string + required: + - repoName + - question + type: object + name: ask_question + type: mcp_list_tools + - encrypted_content: gAAAAABo-r2bs6ChS2NtAXH6S8ZWRHzygQvAZrQGsb5ziJKg6dINF9TQnq4llBquiZh-3Ngx2Ha4S-2_TLSbgcsglradULI8c8N2CnilghcqlLE90MXgHWzGfMDbmnRVpTW9iJsOnBn4ferQtNLIsXzfGWq4Ov0Bbvlw_fCm9pQsqOavcJ5Kop2lJ9Xqb__boYMcBCPq3FcNlfC3aia2wZkacS4qKZGqytqQP13EX3q6LwFVnAMIFuwn5XLrh4lFf-S5u8UIw3C6wvVIXEUatY6-awgHHJKXxWUxqRQPJegatMb8KE-QtuKQUfdvEE0ykdHtWqT7nnC3qTY67UaSCCvJ9SdXj-t806GVei9McSUe8riU3viHnfY0R0u9GIXsVnfVthIDRnX7KzpF5ot_CpCrgbCmD9Rj2AAos5pCdSzpc08G5auUuuMZfoiWANADTHHhO2OvflSEpmO8pb-QAYfMoK9exYVQ8Oig-Nj35unupcYy7A2bDCViXzqy32aw9QHmH7rErI4v72beWQxRVdX15Z7VS2c6L1dD7cU18K35CWqlSz9hEX5AcGqEEtIDVu1TdF3m1m2u4ooc4TjYpRecjYoG8Ib-vVKoX5C65a7G1cTbCo8dO0DYKGgM8jM7ZDubxbCcZ22Sxk58f8cer7WxHyp7WRo5-6zvMwMCk8uEY44RJmg-m0Oxl_6qxdr4Md80xZah_6tCCB62agQmYwCrR75_r93xOckQAK0R_37khvQD5gWVlE5Rg-01eUTboiPGqYmIsqWvOkziMGnxgKVw_yUf8swHU1ciWr7O1EdVPHLG7YXlVQTHTE_CX3uOsE2FoZnpS_MgpxGfjb76majV50h7mJ6ySVPF_3NF3RQXx64W08SW4eVFD8JJf0yChqXDmlwu2CDZN1n99xdaE9QbMODNEOmfTQOPhQ9g-4LhstNTKCCxWDh0qiv_dq2qAd0I9Gupoit33xGpb66mndc0nuuNFe8-16iC_KzQtHBNzgasgYK-r83KFVmiYK3Jxvz_2dfdwe0M1q7NLBvbnWc6k9LIf8iDUF6Q1J-cfC7SsncCbROtzIPlKpQwxhP-M09Xy3RVxlH9dcvuk3_qqEAartUQC8ZbuLRbhiq66eE1RvQzdNd2tsoBQ85cdNs57Penio7w9zILUf1JP5O8-zCe5GPC3W3EXTIEvHR-kiuxJvhcsySijpldGmuygRx05ARNOIT7VDCZvF23RfmnRduY1X1FAqb_i_aMStK7iyHr_2ohwOWLuklpyuoG0Y1ulvq1A9-hyCZ0mpvTEF6om2tAZ9_7h8W9ksiOkey0yA-6ze17MCjfnK2XcbqmSMgOngW1PrD81oKoheMnIeJdcWgF2mk8VDqmAwaDTxMxdnXkzK74rA43a4rWk3d2bUts8dAUkuYXTwJwKQw4LfXtu-mwwgJ6BkT_GiBcBJ6ulBuPsNZfpwPuxox6PS6KpzVTQ94cKNqSIIyFCD4xZsEvPALud09-gmAEDHxdnPjqLSi2U8xd0j-6XYKN0JtZ45kwEIRsOrFu-SYLz1OcYFKI5A5P-vYlzGx1WhEnoeUlyooJBhNj6ZBfj9f63SByxm7sgh260vf1t-4OGzVTIUKFluxkI4ubigLZ-g4q4dSwiEWXn50JFPrtuPs5VxsIIz_lXbh1SrKeQ647KdDSAQZFgEfzOOt3el5K97V1x7V7gEWCCgmqDIz3yZPpwD6qmUQKqlj_p8-OQrniamGULkXrmrgbNQVfV-Qw7Hg6ELw4aHF_IZME9Qnyn7peFhH6ai_YapuNF7FK-MBtPYoMaqBf05U2-uJAVUas3VuT_-pTyHvhtFmB7vc0-qgf_CtVNIXSPq2_vXdQdEwwCVPPwW6xWm-invrzhyQR_mf3OQqZT6_zOHIMPBJUaXcQKT0KTdoBZUDamAR-ECZl8r6wdLCn0HjAEwj3ifUCNMzQ7CZHUQG46rj61YyasNWO__4Ef4kTcApKgljosuABqP4HAdmkP5eEnX-6nutrL50iv-Mms_R-T7SKtmEEf9wihTu4Meb441cU9DI4WwSyiBSnsYdGy9FJKmHwP7HD0FmpmWkOrtROkQVMlMVKQFlKK8OBtxafHYsZkWDawbA1eetzMBzQ3PP8PSvva6SJWjbgURHVm5RjXV8Hk6toIBEDx9r9vAIczSp49eDCkQbzPkGAVilO3KLQpNx2itBbZzgE36uV0neZZsVs7aqafI4qCTQOLzYA8YFDKz92yhgdIzl5VPFLFNHqRS4duPRQImQ7vb6yKSxjDThiyQQUTPBX_EXUAAR7JHwJI1i8la3V + id: rs_0083938b3a28070e0068fabd84727c81a0a52c171d2568a947 + summary: [] + type: reasoning + - approval_request_id: null + arguments: '{"repoName":"pydantic/pydantic-ai","question":"Provide a brief summary of the repository, including purpose, + main features, and status."}' + error: null + id: mcp_0083938b3a28070e0068fabd88db5c81a08e56f163bbc6088b + name: ask_question + output: | + Pydantic AI is a Python agent framework designed to build production-grade applications using Generative AI, emphasizing an ergonomic developer experience and type-safety . It provides type-safe agents, a model-agnostic design supporting over 15 LLM providers, structured outputs with Pydantic validation, comprehensive observability, and production-ready tooling . The project is structured as a UV workspace monorepo, including core framework components, an evaluation system, a graph execution engine, examples, and a CLI tool . + + ## Purpose + + The primary purpose of Pydantic AI is to simplify the development of reliable AI applications by offering a robust framework that integrates type-safety and an intuitive developer experience . It aims to provide a unified approach to interacting with various LLM providers and managing complex agent workflows . + + ## Main Features + + ### Type-Safe Agents + Pydantic AI agents are generic `Agent[Deps, Output]` for compile-time validation, utilizing `RunContext[Deps]` for dependency injection and Pydantic `output_type` for output validation . This ensures that the inputs and outputs of agents are strictly typed and validated . + + ### Model-Agnostic Design + The framework supports over 15 LLM providers through a unified `Model` interface, allowing developers to switch between different models without significant code changes . Implementations for providers like OpenAI, Anthropic, and Google are available . + + ### Structured Outputs + Pydantic AI leverages Pydantic for automatic validation and self-correction of structured outputs from LLMs . This is crucial for ensuring data integrity and reliability in AI applications . + + ### Comprehensive Observability + The framework includes comprehensive observability features via OpenTelemetry and native Logfire integration . This allows for tracing agent runs, model requests, tool executions, and monitoring token usage and costs . + + ### Production-Ready Tooling + Pydantic AI offers an evaluation framework, durable execution capabilities, and protocol integrations . + * **Tool System**: Tools can be registered using the `@agent.tool` decorator, with automatic JSON schema generation from function signatures and docstrings . + * **Graph Execution**: The `pydantic_graph.Graph` module provides a graph-based state machine for orchestrating agent execution, using nodes like `UserPromptNode`, `ModelRequestNode`, and `CallToolsNode` . + * **Evaluation Framework**: The `pydantic-evals` package provides tools for creating datasets, running evaluators (e.g., `ExactMatch`, `LLMEvaluator`), and generating reports . + * **Integrations**: It integrates with various protocols and environments, including Model Context Protocol (MCP) for external tool servers, AG-UI for interactive frontends, and Temporal/DBOS for durable execution . + + ## Status + The project is actively maintained and considered "Production/Stable" . It supports Python versions 3.10 through 3.13 . The documentation is built using MkDocs and includes API references and examples . + + ## Notes + The repository is organized as a monorepo using `uv` for package management . Key packages include `pydantic-ai-slim` (core framework), `pydantic-evals` (evaluation system), `pydantic-graph` (graph execution engine), `examples` (example applications), and `clai` (CLI tool) . + + Wiki pages you might want to explore: + - [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1) + + View this search on DeepWiki: https://deepwiki.com/search/provide-a-brief-summary-of-the_a5712f6e-e928-4886-bcea-b9b75761aac5 + server_label: deepwiki + status: completed + type: mcp_call + - encrypted_content: gAAAAABo-r2bD-v0Y3pAlyAEK1Sb8qJJcJRKSRtYwymHwLNXY-SKCqd_Q5RbN0DLCclspuPCAasGLm1WM1Q2Y_3szaEEr_OJalXTVEfRvhCJE1iTgoz2Uyf7KttZ4W92hlYjE8cjgdo5tKtSVkNyzTs4JUHKRHoDMutL2KivjZKuK_4n-lo9paJC_jmz6RWO8wUoXo3_fGxjliOGnWyRXwEPmgAcEWNOSVgCgAEO3vXerXRPLie02HegWcLMtK6WORDHd02Kr86QSK3W30bnvU7glAFX6VhSSnR8G0ceAM-ImoomQ8obEDyedX1-pYDKPOa4pZ5iTjD24ABYOwz-0L7SNziQJLycwwsr11Fj0_Au9yJph8YkNb2nAyFeiNVCRjKul51B7dZgz-UZ9juWO2ffeI0GNtQTYzf46_Y1t0qykGW6w59xjmBHTKf5SiSe0pqWxZ6LOLoPx01rX2gLaKgNZZiERSbO0iwbA4tpxb9ur-qeFVv5tS7xy8KFYOa8SPrypvFWDoY6CjSwTS3ir0vyfpbJy-n6bcYP_pTwDZxy_1aVkciim8Tmm_9wYgI0uY5kcA9VYJuyc4cg7S7ykTUxMZz7xiLMf8FoXl1gHbVJrYriyZzh2poYTWlcCuSCiUaXhQKxcxMRrt_P7WANx0n68ENQ40HkoJ6rThvWUuwtmEYqZ0ldh3XSFtyNrqha4PQ5eg_DudlU_5CxyykuzWmi_o5MEW4_XW4b9vdXg1laqx4189_jEuV_JPGNeL3Ke4EbMbKHzsiaGePRZGgNutnlERagmU4VFTeoE5bN3oHlR_Au4PeQxdb7BuBmZRDDCnnIRd2NfSWb7bgfUozkA4S6rm_089OlRBeRVoLtA8zZZinNGtOZl7MtkLnoJVIWpF1rr7D_47eWSyyegUIIS2e5UKLJfCLkNgSlWPU9VquHEzSfqeHfzoN5ccoVwrvrHmeveTjI-wIJygdfuyti5cMgOOkAtLzjWmbs4CjmlWcbZKeidtDj5YpCSmYAGFuZze-cSbNjMv4th639dCu_jmRMze-l2Y5npbRwMqEJr7VLXghmLc1vhOsaQM3gxoF0CJJlmvtR4jxPqhE3694YRva6LS1WjR4oueM6zfpVeB2kC0hQgqaL6MiwtTRYFfuCzEHi18TwA5bqqkfgrDXedmjAzlEGSZFe2EBRlF_ZtagrVVTCagHQArnH3DkVQMEDCHCqDxA_PINR_997IxeNgGPsvazVdOOBef7sO4rvAWrC94nIlt7d4aViqbTNMW-W8rqjGFOqj1swrM0yoX5y6LY5oXPc3Mu35xeitn_paqtGPkvuH6WeGzAiNZFDoQkUdLkZ4SIH2lr4ZXmMI3nuTzCrwyshwcEu-hhVtGAEQEqVrIn8J75IzYTs1UGLBvhmcpHxCfG04MFNoVf-EPI4SgjNEgV61861TYshxCRrydVhaJmbLqYh8yzLYBHK6oIymv-BrIJ0LX222LwoGbSc0gMTMaudtthlFXrHdnswKf81ubhF7viiD3Y= + id: rs_0083938b3a28070e0068fabd97008081a0ad1b2362bcb153c9 + summary: [] + type: reasoning + - content: + - annotations: [] + logprobs: [] + text: "Pydantic AI is a Python agent framework for building production-grade AI apps with:\n\n• Type-safe agents + (generic Agent[Deps, Output] + Pydantic validation) \n• Model-agnostic support (15+ LLM providers via a unified + Model interface) \n• Structured outputs (automatic Pydantic validation & self-correction) \n• Observability + (OpenTelemetry + Logfire tracing, token/cost monitoring) \n• Production tooling (decorator-driven tool system, + graph execution engine, evaluation framework, CLI) \n\nIt’s a UV-workspace monorepo (core packages, evals, graph + engine, examples, CLI), supports Python 3.10–3.13, and is actively maintained under an open-source license." + type: output_text + id: msg_0083938b3a28070e0068fabd989bb481a08c61416ab343ef49 + role: assistant + status: completed + type: message + parallel_tool_calls: true + previous_response_id: null + prompt_cache_key: null + reasoning: + effort: medium + summary: null + safety_identifier: null + service_tier: default + status: completed + store: true + temperature: 1.0 + text: + format: + type: text + verbosity: medium + tool_choice: auto + tools: + - allowed_tools: + - ask_question + headers: + custom-header-key: + require_approval: never + server_description: DeepWiki MCP server + server_label: deepwiki + server_url: https://mcp.deepwiki.com/ + type: mcp + top_logprobs: 0 + top_p: 1.0 + truncation: disabled + usage: + input_tokens: 1207 + input_tokens_details: + cached_tokens: 0 + output_tokens: 535 + output_tokens_details: + reasoning_tokens: 320 + total_tokens: 1742 + user: null + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '5961' + content-type: + - application/json + cookie: + - __cf_bm=I1KszUjNwv2uNOtjsRRsldNvnVOw8h1u.wxWNcm3jQA-1761263003-1.0.1.1-akBJ4N038sDJQ5.bA.t0OOiJp.ZjUeBgcfh74f1WlJ4udDx4xKQgBLcuIt2baVr2HvEZeWvaVmBhENKyB0o_ICNY1qotoNSJ0NLlfi6aFmI; + _cfuvid=ZOdy7lYvFxE_5RaRILLwN_uBW1gic_STDgqG0ZvJuX0-1761263003633-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + include: + - reasoning.encrypted_content + input: + - content: Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short + role: user + - id: mcpl_0083938b3a28070e0068fabd81d51081a09d4b183ced693273 + server_label: deepwiki + tools: [] + type: mcp_list_tools + - encrypted_content: gAAAAABo-r2bs6ChS2NtAXH6S8ZWRHzygQvAZrQGsb5ziJKg6dINF9TQnq4llBquiZh-3Ngx2Ha4S-2_TLSbgcsglradULI8c8N2CnilghcqlLE90MXgHWzGfMDbmnRVpTW9iJsOnBn4ferQtNLIsXzfGWq4Ov0Bbvlw_fCm9pQsqOavcJ5Kop2lJ9Xqb__boYMcBCPq3FcNlfC3aia2wZkacS4qKZGqytqQP13EX3q6LwFVnAMIFuwn5XLrh4lFf-S5u8UIw3C6wvVIXEUatY6-awgHHJKXxWUxqRQPJegatMb8KE-QtuKQUfdvEE0ykdHtWqT7nnC3qTY67UaSCCvJ9SdXj-t806GVei9McSUe8riU3viHnfY0R0u9GIXsVnfVthIDRnX7KzpF5ot_CpCrgbCmD9Rj2AAos5pCdSzpc08G5auUuuMZfoiWANADTHHhO2OvflSEpmO8pb-QAYfMoK9exYVQ8Oig-Nj35unupcYy7A2bDCViXzqy32aw9QHmH7rErI4v72beWQxRVdX15Z7VS2c6L1dD7cU18K35CWqlSz9hEX5AcGqEEtIDVu1TdF3m1m2u4ooc4TjYpRecjYoG8Ib-vVKoX5C65a7G1cTbCo8dO0DYKGgM8jM7ZDubxbCcZ22Sxk58f8cer7WxHyp7WRo5-6zvMwMCk8uEY44RJmg-m0Oxl_6qxdr4Md80xZah_6tCCB62agQmYwCrR75_r93xOckQAK0R_37khvQD5gWVlE5Rg-01eUTboiPGqYmIsqWvOkziMGnxgKVw_yUf8swHU1ciWr7O1EdVPHLG7YXlVQTHTE_CX3uOsE2FoZnpS_MgpxGfjb76majV50h7mJ6ySVPF_3NF3RQXx64W08SW4eVFD8JJf0yChqXDmlwu2CDZN1n99xdaE9QbMODNEOmfTQOPhQ9g-4LhstNTKCCxWDh0qiv_dq2qAd0I9Gupoit33xGpb66mndc0nuuNFe8-16iC_KzQtHBNzgasgYK-r83KFVmiYK3Jxvz_2dfdwe0M1q7NLBvbnWc6k9LIf8iDUF6Q1J-cfC7SsncCbROtzIPlKpQwxhP-M09Xy3RVxlH9dcvuk3_qqEAartUQC8ZbuLRbhiq66eE1RvQzdNd2tsoBQ85cdNs57Penio7w9zILUf1JP5O8-zCe5GPC3W3EXTIEvHR-kiuxJvhcsySijpldGmuygRx05ARNOIT7VDCZvF23RfmnRduY1X1FAqb_i_aMStK7iyHr_2ohwOWLuklpyuoG0Y1ulvq1A9-hyCZ0mpvTEF6om2tAZ9_7h8W9ksiOkey0yA-6ze17MCjfnK2XcbqmSMgOngW1PrD81oKoheMnIeJdcWgF2mk8VDqmAwaDTxMxdnXkzK74rA43a4rWk3d2bUts8dAUkuYXTwJwKQw4LfXtu-mwwgJ6BkT_GiBcBJ6ulBuPsNZfpwPuxox6PS6KpzVTQ94cKNqSIIyFCD4xZsEvPALud09-gmAEDHxdnPjqLSi2U8xd0j-6XYKN0JtZ45kwEIRsOrFu-SYLz1OcYFKI5A5P-vYlzGx1WhEnoeUlyooJBhNj6ZBfj9f63SByxm7sgh260vf1t-4OGzVTIUKFluxkI4ubigLZ-g4q4dSwiEWXn50JFPrtuPs5VxsIIz_lXbh1SrKeQ647KdDSAQZFgEfzOOt3el5K97V1x7V7gEWCCgmqDIz3yZPpwD6qmUQKqlj_p8-OQrniamGULkXrmrgbNQVfV-Qw7Hg6ELw4aHF_IZME9Qnyn7peFhH6ai_YapuNF7FK-MBtPYoMaqBf05U2-uJAVUas3VuT_-pTyHvhtFmB7vc0-qgf_CtVNIXSPq2_vXdQdEwwCVPPwW6xWm-invrzhyQR_mf3OQqZT6_zOHIMPBJUaXcQKT0KTdoBZUDamAR-ECZl8r6wdLCn0HjAEwj3ifUCNMzQ7CZHUQG46rj61YyasNWO__4Ef4kTcApKgljosuABqP4HAdmkP5eEnX-6nutrL50iv-Mms_R-T7SKtmEEf9wihTu4Meb441cU9DI4WwSyiBSnsYdGy9FJKmHwP7HD0FmpmWkOrtROkQVMlMVKQFlKK8OBtxafHYsZkWDawbA1eetzMBzQ3PP8PSvva6SJWjbgURHVm5RjXV8Hk6toIBEDx9r9vAIczSp49eDCkQbzPkGAVilO3KLQpNx2itBbZzgE36uV0neZZsVs7aqafI4qCTQOLzYA8YFDKz92yhgdIzl5VPFLFNHqRS4duPRQImQ7vb6yKSxjDThiyQQUTPBX_EXUAAR7JHwJI1i8la3V + id: rs_0083938b3a28070e0068fabd84727c81a0a52c171d2568a947 + summary: [] + type: reasoning + - arguments: '{"repoName":"pydantic/pydantic-ai","question":"Provide a brief summary of the repository, including purpose, + main features, and status."}' + error: null + id: mcp_0083938b3a28070e0068fabd88db5c81a08e56f163bbc6088b + name: ask_question + output: null + server_label: deepwiki + type: mcp_call + - encrypted_content: gAAAAABo-r2bD-v0Y3pAlyAEK1Sb8qJJcJRKSRtYwymHwLNXY-SKCqd_Q5RbN0DLCclspuPCAasGLm1WM1Q2Y_3szaEEr_OJalXTVEfRvhCJE1iTgoz2Uyf7KttZ4W92hlYjE8cjgdo5tKtSVkNyzTs4JUHKRHoDMutL2KivjZKuK_4n-lo9paJC_jmz6RWO8wUoXo3_fGxjliOGnWyRXwEPmgAcEWNOSVgCgAEO3vXerXRPLie02HegWcLMtK6WORDHd02Kr86QSK3W30bnvU7glAFX6VhSSnR8G0ceAM-ImoomQ8obEDyedX1-pYDKPOa4pZ5iTjD24ABYOwz-0L7SNziQJLycwwsr11Fj0_Au9yJph8YkNb2nAyFeiNVCRjKul51B7dZgz-UZ9juWO2ffeI0GNtQTYzf46_Y1t0qykGW6w59xjmBHTKf5SiSe0pqWxZ6LOLoPx01rX2gLaKgNZZiERSbO0iwbA4tpxb9ur-qeFVv5tS7xy8KFYOa8SPrypvFWDoY6CjSwTS3ir0vyfpbJy-n6bcYP_pTwDZxy_1aVkciim8Tmm_9wYgI0uY5kcA9VYJuyc4cg7S7ykTUxMZz7xiLMf8FoXl1gHbVJrYriyZzh2poYTWlcCuSCiUaXhQKxcxMRrt_P7WANx0n68ENQ40HkoJ6rThvWUuwtmEYqZ0ldh3XSFtyNrqha4PQ5eg_DudlU_5CxyykuzWmi_o5MEW4_XW4b9vdXg1laqx4189_jEuV_JPGNeL3Ke4EbMbKHzsiaGePRZGgNutnlERagmU4VFTeoE5bN3oHlR_Au4PeQxdb7BuBmZRDDCnnIRd2NfSWb7bgfUozkA4S6rm_089OlRBeRVoLtA8zZZinNGtOZl7MtkLnoJVIWpF1rr7D_47eWSyyegUIIS2e5UKLJfCLkNgSlWPU9VquHEzSfqeHfzoN5ccoVwrvrHmeveTjI-wIJygdfuyti5cMgOOkAtLzjWmbs4CjmlWcbZKeidtDj5YpCSmYAGFuZze-cSbNjMv4th639dCu_jmRMze-l2Y5npbRwMqEJr7VLXghmLc1vhOsaQM3gxoF0CJJlmvtR4jxPqhE3694YRva6LS1WjR4oueM6zfpVeB2kC0hQgqaL6MiwtTRYFfuCzEHi18TwA5bqqkfgrDXedmjAzlEGSZFe2EBRlF_ZtagrVVTCagHQArnH3DkVQMEDCHCqDxA_PINR_997IxeNgGPsvazVdOOBef7sO4rvAWrC94nIlt7d4aViqbTNMW-W8rqjGFOqj1swrM0yoX5y6LY5oXPc3Mu35xeitn_paqtGPkvuH6WeGzAiNZFDoQkUdLkZ4SIH2lr4ZXmMI3nuTzCrwyshwcEu-hhVtGAEQEqVrIn8J75IzYTs1UGLBvhmcpHxCfG04MFNoVf-EPI4SgjNEgV61861TYshxCRrydVhaJmbLqYh8yzLYBHK6oIymv-BrIJ0LX222LwoGbSc0gMTMaudtthlFXrHdnswKf81ubhF7viiD3Y= + id: rs_0083938b3a28070e0068fabd97008081a0ad1b2362bcb153c9 + summary: [] + type: reasoning + - content: + - annotations: [] + text: "Pydantic AI is a Python agent framework for building production-grade AI apps with:\n\n• Type-safe agents + (generic Agent[Deps, Output] + Pydantic validation) \n• Model-agnostic support (15+ LLM providers via a unified + Model interface) \n• Structured outputs (automatic Pydantic validation & self-correction) \n• Observability + (OpenTelemetry + Logfire tracing, token/cost monitoring) \n• Production tooling (decorator-driven tool system, + graph execution engine, evaluation framework, CLI) \n\nIt’s a UV-workspace monorepo (core packages, evals, graph + engine, examples, CLI), supports Python 3.10–3.13, and is actively maintained under an open-source license." + type: output_text + id: msg_0083938b3a28070e0068fabd989bb481a08c61416ab343ef49 + role: assistant + status: completed + type: message + - content: What packages does the repo contain? + role: user + instructions: You are a helpful assistant. + model: o4-mini + stream: false + tool_choice: auto + tools: + - allowed_tools: + - ask_question + headers: + custom-header-key: custom-header-value + require_approval: never + server_description: DeepWiki MCP server + server_label: deepwiki + server_url: https://mcp.deepwiki.com/mcp + type: mcp + uri: https://api.openai.com/v1/responses + response: + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '5265' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '4861' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + background: false + billing: + payer: developer + created_at: 1761263005 + error: null + id: resp_0083938b3a28070e0068fabd9d414881a089cf24784f80e021 + incomplete_details: null + instructions: You are a helpful assistant. + max_output_tokens: null + max_tool_calls: null + metadata: {} + model: o4-mini-2025-04-16 + object: response + output: + - encrypted_content: gAAAAABo-r2izZacxe_jVh_p3URhewxBJuyLNqkJOd0owsDPt9uCE7MXn06WHhO_mp6gLDAqcF1uhMhXCqwztJ1Nbpc0cEDAxUpUCUn2bKSgG6r8Snc_FPtKGgQWDsByvW_Nigx55CyPuNeDO_MiDgYee_WeUw7ASLPfiGOx_9YNc_BFYo1ngsb8CKZcJn3AoponMheLoxkVAPgOjMgteRVaQTr13MljTDUlBIZLIOhVbtIu_dI23saXPigbgwR4RhGn5mCHG_a9ILNkXDJUmGy5TKklIEi2HuJM3ZJ3gfoGYS3OONvzmU4AgMP2UrU17YKZAYKxUBKSpyAqigd4RJSYWzxBCoYzCTmiITwdZ6Cpsw1X9Wox_TQSGt5G2Xu0UY2TQZGRNNH8knJpWs-UQxBBV4L3alMwJuIeV-uzqeKr5fKO5rL_c9as-qQIW_EGQItjvR5z80Hi-S9VXthWCmtqZFIJkgLB5JfTYuFL86valsFVLzSavUIWJAG5qOcxag2mbZMwMRRNfvR__BBtoqBoeGIqveQAbIeZbG0ymw30PH1a2v1mmSrpkK6PB3AHYRDdpkezXLkbyGYgidyV2DAAtPaFplsubWCh_74UxmOuk4BH-9cWkE15mRUBrvtnbTb793RsPzOe7nPmkMpdgqa3nqc6RcQZ_M30lFLUViAbfpEpMVrCzz2cv1RklT1JUzpuVXBTKqQ4FxVCfnvzSgQ2INQ8K50E1X5w_7TAWhrHbNg6LetCa-4KWe9ps0GH6r1x9FWvGyVxSwa7SIdPq3sGpxjOydluPECbBOnHWFUB-3rI2DcUl4rGWYbv2FEFNeCH9Zr67uUvMc4Doi8nVMoeb1lJxFCrfziGhbEXY0FepH3zIzlj-_dXqLAL1qqhfCznT_xkDMVYg-D5gMu-_p3r2SirjJbeaz5UFmP-Dihd9v7jWgD6hx_Mq1uIdzIPE8ImGiDPR7PK64svkvwYg1Czdrc_7GmrKRuzsBL0720UXe19NQqCZfYvUJAjgbEqr3tuS_RkhuEQeeVORn88xkhkrGCEgBS0LHFpe4tcnUEXKnaYYRnoYtk5xo4EyOGVKR2yhF9ht2zrMTo83YuRAPcNT38Jk4gMtVhBaJw_GOfee-IWN_F258rpmU4p8sRV-1iSuQI3Arm4JBU66QuyjoY-KJmTcE9ft3Bfm9If3yG5W0RFRJrsVb--GjHmiiXDGWiR5Q8L1of_RnSD5QDEbXXxhn4dsDejtCXUaQXE9Ty-NvkvA7G6Ru8cMvIKqP2fXS9SmiW6ePJ2Znrlyafxx6L58pT26RF42h90BVrSldf6SjxQApK3AKZW6q8AkuJnYWTtkR9-qfIDl7W94BsgOFoEd-SDQGxWzGJV9YqAu6_SQKiNDQoZZHrJkRSOPEW_b3-BAdrpwL700I92Rye4-BdhlgeK1RwhT3w1Z-z1tvGZXJtPwdpPa3iIw2TIlesMbC1ZJ22iT3CB_r0lnlZhMtIH6o50l50UGfSDuv8HZ_RNgGnYEPqP3FW-o_VD_Yu_KBqGSA0Eb5xAJjl0vpin2vFGO1P4RdgI17eZXRsCp1KvkpWjbEQTWAvJz39yr7wFQ4BrPfgxUqMP0-ZI_h1DkdPBzWs1uKqHw-4qC77sZXgxgHGEIU1tfKosTy_fK4c-WAbdqIHNTh9VdlM1EdrUJQ4rs2rsUG8o9WXwnGTFchI9Ao64LiCFTFTiFL_dvKI4ZraNNXXprfPhxsdLBaNfgj2CIfUwBMJ9xMGmHKQKLtwZdHpQNVqi8DNm1qjvs3CxbSXGKtkl5K8UhJtI1g4OnEnbq3jDO8DGIyDl0NH-0bcCDqS2yAkh8I3IobzxTg16mqU3roXLQ4pGXnWbx26A_9zb4Y1jV7rzCq24VIfNJzMUtW4fVMYzlrp3X1l32I5hF3YP-tU2paD98xobgc2Cn2RWXd3OirrdjKAE088KhXYLZZY59y4LYRLC6MDMHSX0cbEXbBvl6mKmbaFig2_7ICiSa7rR_Ij6PpQRxIW7NfS7ZMu5w7TnhLJyg5nuwMI8A5pVxfy3gYg2L60wepuX7UUV0USaHNKi8qxbp4RJj4nO-GdE8TbLJtvPw-OzrH9Qiv7iDHVMHOe1CDPLD5IeGqmVB0tuLqlyASuIe3oPxTU7QdctyxHa1z-sO8nN6kpPnzmVmS6XK8bY-h5do28dkZvefomSquXwKeiVg9VAMWVziKLPWWg5iWp2x-spLkWcQsQle2T7xizyETaF1t6YbecXtSoVFmu90_o6ns07etU3RVK1YpQLgqUIJwwF3ZwP65MaWPwqDuWCuoQErlApdhRptxId67KE3UC4j8cAaGSoG0kXnws-jzpPyAg1GU8c-Gu_K0F-h-KFbHPMiWCrrQqzVfvoA2wLaQz3NPAqpq-kbFmrXRGkzLIeIvRVxck-sKkxQIcg3amSV5Dykl-lRCXGxlWNiFG_1SFrTSfp5VKyg7l1KjJzXUXHtqAErsPtMyhxaMmlh4An5a8NIaM9W6tafJrBXpUh85DfwZ8W92OAi1WOgoJIwWXSSeSuo6ECDstjVWW3OQQh9183jliwS7Bis3eu9jgAF3q8sYILBdwjrJRa6aAna2GirNwqZMEIg60kIlvmf1U6S2PgYaPm9UDzvMxjpzwjhXhzxHJitfU1tfl0vo-ATaTV8CxmKerNzy2AjlIZnjknG3xLyonCHbGbAe33QQTclb98y_vr5nA4WKlrls413o0a0f8GL8GjINCOd1RHVMjV + id: rs_0083938b3a28070e0068fabd9de42881a08fbb49a65d0f9b06 + summary: [] + type: reasoning + - content: + - annotations: [] + logprobs: [] + text: "The monorepo is organized into these main packages: \n\n• pydantic-ai-slim – core agent framework (type-safe + agents, model interface, tooling) \n• pydantic-evals  – evaluation system (datasets, metrics, evaluators, reports) + \ \n• pydantic-graph  – graph-based execution engine (state-machine orchestration) \n• clai       – CLI for scaffolding + and running agents \n• examples    – sample apps & demos showing real-world usage" + type: output_text + id: msg_0083938b3a28070e0068fabda04de881a089010e6710637ab3 + role: assistant + status: completed + type: message + parallel_tool_calls: true + previous_response_id: null + prompt_cache_key: null + reasoning: + effort: medium + summary: null + safety_identifier: null + service_tier: default + status: completed + store: true + temperature: 1.0 + text: + format: + type: text + verbosity: medium + tool_choice: auto + tools: + - allowed_tools: + - ask_question + headers: + custom-header-key: + require_approval: never + server_description: DeepWiki MCP server + server_label: deepwiki + server_url: https://mcp.deepwiki.com/ + type: mcp + top_logprobs: 0 + top_p: 1.0 + truncation: disabled + usage: + input_tokens: 1109 + input_tokens_details: + cached_tokens: 0 + output_tokens: 444 + output_tokens_details: + reasoning_tokens: 320 + total_tokens: 1553 + user: null + status: + code: 200 + message: OK +version: 1 diff --git a/tests/models/cassettes/test_openai_responses/test_openai_responses_model_mcp_server_tool_stream.yaml b/tests/models/cassettes/test_openai_responses/test_openai_responses_model_mcp_server_tool_stream.yaml new file mode 100644 index 0000000000..2e6ade40d8 --- /dev/null +++ b/tests/models/cassettes/test_openai_responses/test_openai_responses_model_mcp_server_tool_stream.yaml @@ -0,0 +1,644 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '438' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + include: + - reasoning.encrypted_content + input: + - content: Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short + role: user + instructions: You are a helpful assistant. + model: o4-mini + stream: true + tool_choice: auto + tools: + - allowed_tools: + - ask_question + - read_wiki_structure + require_approval: never + server_label: deepwiki + server_url: https://mcp.deepwiki.com/mcp + type: mcp + uri: https://api.openai.com/v1/responses + response: + body: + string: |+ + event: response.created + data: {"type":"response.created","sequence_number":0,"response":{"id":"resp_00b9cc7a23d047270068faa0e25934819f9c3bfdec80065bc4","object":"response","created_at":1761255650,"status":"in_progress","background":false,"error":null,"incomplete_details":null,"instructions":"You are a helpful assistant.","max_output_tokens":null,"max_tool_calls":null,"model":"o4-mini-2025-04-16","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[{"type":"mcp","allowed_tools":["ask_question","read_wiki_structure"],"headers":null,"require_approval":"never","server_description":null,"server_label":"deepwiki","server_url":"https://mcp.deepwiki.com/"}],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + event: response.in_progress + data: {"type":"response.in_progress","sequence_number":1,"response":{"id":"resp_00b9cc7a23d047270068faa0e25934819f9c3bfdec80065bc4","object":"response","created_at":1761255650,"status":"in_progress","background":false,"error":null,"incomplete_details":null,"instructions":"You are a helpful assistant.","max_output_tokens":null,"max_tool_calls":null,"model":"o4-mini-2025-04-16","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[{"type":"mcp","allowed_tools":["ask_question","read_wiki_structure"],"headers":null,"require_approval":"never","server_description":null,"server_label":"deepwiki","server_url":"https://mcp.deepwiki.com/"}],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":2,"output_index":0,"item":{"id":"mcpl_00b9cc7a23d047270068faa0e29804819fb060cec0408ffbcd","type":"mcp_list_tools","server_label":"deepwiki","tools":[]}} + + event: response.mcp_list_tools.in_progress + data: {"type":"response.mcp_list_tools.in_progress","sequence_number":3,"output_index":0,"item_id":"mcpl_00b9cc7a23d047270068faa0e29804819fb060cec0408ffbcd"} + + event: response.mcp_list_tools.completed + data: {"type":"response.mcp_list_tools.completed","sequence_number":4,"output_index":0,"item_id":"mcpl_00b9cc7a23d047270068faa0e29804819fb060cec0408ffbcd"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":5,"output_index":0,"item":{"id":"mcpl_00b9cc7a23d047270068faa0e29804819fb060cec0408ffbcd","type":"mcp_list_tools","server_label":"deepwiki","tools":[{"annotations":{"read_only":false},"description":"Get a list of documentation topics for a GitHub repository","input_schema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"}},"required":["repoName"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"},"name":"read_wiki_structure"},{"annotations":{"read_only":false},"description":"Ask any question about a GitHub repository","input_schema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"},"question":{"type":"string","description":"The question to ask about the repository"}},"required":["repoName","question"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"},"name":"ask_question"}]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":6,"output_index":1,"item":{"id":"rs_00b9cc7a23d047270068faa0e4cd5c819f8855c183ff0fe957","type":"reasoning","encrypted_content":"gAAAAABo-qDkAWOlmTbSi3dHKOLXoeuhbAOwFJBolrA9MO7xNjYR1LR5Qsw5wETMdI3eEOjwKAblU7dCGv-TG39hSoYCiqdCqAfBbSQeULhcxxTQh94DdushltLeuG7UriT2i0XMt7yGvGcKQiIa_rKMsImTEda3cUTPUIeTLNiQiwk3-aYY10k1Eg8SW3otxXiUHCEWKz6agS-4rzN3-85uzNf_hYhBxlvC3n2qiivCGi897J0uM30FLuEj9Fowd-fXbiq9fQpX1sMh0xrpevVL5Y2H2nQj66bsaLrinRlKQMkPNkFjzDv4Yi9boKPLdNwz6V4ArQPa6jiGk7Acwe4csAplhhImFMNDS-HWBtb1MtYYUVb-zg1DmBUE3H6OHj-tGuM3Ktm4Vf7WrKxG1VsK7FpQAxYT5UGP96v1X7wevjovyJFtownlESVgnSyf9BFmLjYGrrSBpRonR59u5NMNJ5oUEF-2dM8eVSKGfu9-T-aiPcXMProBSoHkFUbNecvl2D071NSHKACighrUfEJUSyXxl0nhPfhuqS28ivSYDi29liIrorZ9OA1hqVCNR9ixkKxkb3EEMfP0S_hNgMFWNWPjw-EZ_gpBb4JIO02Dw8ITqE51GsG1JhB64p8jtd_ZINYo4Oi89OxPg8O9sOel6KdTJA02SN_NgkNH3SJ5Apd10_lwcht5FZbBr15QvhlWvXp9KN6iBYVbVQ8tCjvl1GhohFLTHvpC2Hhb2ABIDnFuolqhj77bCi1lqUWSKdyM8IGrU6bBs7TFAMgIZyoJIDahUjJ8Mbfgg0fvICzCqLtnF-sO-uKBZfsGEsAWTyyOgRY4x-og","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":7,"output_index":1,"item":{"id":"rs_00b9cc7a23d047270068faa0e4cd5c819f8855c183ff0fe957","type":"reasoning","encrypted_content":"gAAAAABo-qDma-ZMjX6meVDoCLYMqgkbQoEVzx_VFnmBFRLqsq37MiF7LP1HrMpqXqtrZ0R2Knb6lUiGSKhsOjOUAn9IFNUCuJx23cPLObF2CKt86wGLb7vccbCrp8bx-I6-kUtZASjlJx7_eJnvwyr24FLZlaDyGDuqRecGA8H4tXnQSAQTT9fJqy8h8dXvxvYzNj5rgOUWgRGn1NBph164KpiEzVWHADzZ_K0l4fX-DFHgtNFssPDYqOKLs_nU0XO8xaIZOgJ8QTf0XmHYF02GA_KciV6sIlSzVricQkwmu1XfJbjpME8XmRMIzlnLRqC8SAJs2kiaYnA8ObfI-s0RbRd3ztIUrzmAsdeo13ualD3tqC1w1_H6S5F47BB47IufTTbpwe_P6f5dLGpOzcrDPbtfHXv-aAW5YEsGyusXqxk51Wp7EONtADmPmVLJffFbRgnwfvPslbxxpNGfxNkN2pIs3U1FW7g1VvmxUfrF84LJpPKvs3xOaWXGorrPBY5nUyeRckhDFt6hGdS59VICmVy8lT4dL_LNswq7dVRS74HrrkfraXDDm2EhL2rtkwhiMqZtuYFsyIK2ys0lZuhNAkhtfgIoV8IwY6O4Y7iXbODxXUr48oZyvLdgV2J2TCcyqIbWClh3-q8MXMmP5wUJdrqajJ8lMVyhQt0UtMJKyk6EWY1DayGpSEW6t8vkqmuYdhyXQOstluONd31LqnEq58Sh8aHCzrypjcLfjDRo5Om1RlxIa-y8S-6rEIXahcJCX_juSg8uYHzDNJffYdBbcLSVQ5mAVl6OM9hE8gHs7SYqw-k-MCeoYsZwt3MqSV7piAu91SMZqB0gXrRDD67bdhmcLBYKmZYKNmLce60WkLH0eZMPSls-n2yyvmwflJA---IZQZOvYXpNUuS7FgMrh3c7n9oDVp15bUgJ8jDx6Mok4pq9E-MHxboblGUpMlFCJDH3NK_7_iHetcqC6Mp2Vc5KJ0OMpDFhCfT3Bvohsee5dUYZezxAkM67qg0BUFyQykulYLHoayemGxzi1YhiX1Of_PEfijmwV2qkUJodq5-LeBVIv8Nj0WgRO-1Y_QW3AWNfQ80Iy6AVa8j9YfsvQU1vwwE9qiAhzSIEeN1Pm2ub8PaRhVIFRgyMOLPVW7cDoNN8ibcOpX-k9p_SfKA9WSzSXuorAs80CTC9OwJibfcPzFVugnnBjBENExTQRfn4l7nWq-tUQNrT4UNGx-xdNeiSeEFCNZlH50Vr5dMaz5sjQQEw_lcTrvxKAV5Zs1mtDf6Kf29LkqhuUEdlMLEJwnAdz2IHLIy41zWLQctSnzBl9HB3mkw8eHZ1LdaRBQRFH4o7Rumhb3D1HdIqDLWeE3jkA6ZBAh2KadGx1u3AIIh4g3dHUS6UREkmzyRIuImbdTsoin1DrQbuYbaqZwIqU4TTIEmA8VeohMfff0rIL5yyFy7cfgGYurgAyMhARPGAAMAoTrR8ldWwymzPkGOJ_SQlzfNGV8weHOEYUl2BgQe57EDX4n1Uk294GIbvGR7eLRL_TLBUyHQErCaOCi8TkBNlLXIobw4ScN_jqqtURmC0mjRDVZeBi6hfrVShWChpQR8A2HxxHrcuHi2hi_2akgUea3zz6_zbUYVoIRdOa9DvZuN015E8ZSL-v_1_vOzUGvt0MuWPazjiRDWgpgcISYzT8N-Xzu_EbwO1OsaOFIeUqrD8mZ6MKOuBQts68og0DWo8KQaHmCaWi4O-c8-5fbB2q3H6oiIoZtSJIoowAmFGOwyWxn_OPS9svDgEaeFYEYhXZ5wZDphxoHkjJ703opxrWoEfQw==","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":8,"output_index":2,"item":{"id":"mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac","type":"mcp_call","status":"in_progress","approval_request_id":null,"arguments":"","error":null,"name":"ask_question","output":null,"server_label":"deepwiki"}} + + event: response.mcp_call.in_progress + data: {"type":"response.mcp_call.in_progress","sequence_number":9,"output_index":2,"item_id":"mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac"} + + event: response.mcp_call_arguments.delta + data: {"type":"response.mcp_call_arguments.delta","sequence_number":10,"output_index":2,"item_id":"mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac","delta":"{\"repoName\":\"pydantic/pydantic-ai\",\"question\":\"What is the pydantic/pydantic-ai repository about?\"}","obfuscation":"YW8dKIvvohK9z"} + + event: response.mcp_call_arguments.done + data: {"type":"response.mcp_call_arguments.done","sequence_number":11,"output_index":2,"item_id":"mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac","arguments":"{\"repoName\":\"pydantic/pydantic-ai\",\"question\":\"What is the pydantic/pydantic-ai repository about?\"}"} + + event: response.mcp_call.completed + data: {"type":"response.mcp_call.completed","sequence_number":12,"output_index":2,"item_id":"mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":13,"output_index":2,"item":{"id":"mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac","type":"mcp_call","status":"completed","approval_request_id":null,"arguments":"{\"repoName\":\"pydantic/pydantic-ai\",\"question\":\"What is the pydantic/pydantic-ai repository about?\"}","error":null,"name":"ask_question","output":"The `pydantic/pydantic-ai` repository is a Python agent framework designed to simplify the development of production-grade applications using Generative AI . It aims to bring the ergonomic developer experience and type-safety philosophy of Pydantic and FastAPI to AI agent development .\n\n## Core Purpose and Features\n\nThe framework focuses on providing a robust and type-safe environment for building AI agents . Key features include:\n\n* **Type-safe Agents**: Agents are generic `Agent[Deps, Output]` for compile-time validation, leveraging Pydantic for output validation and dependency injection .\n* **Model-agnostic Design**: It supports over 15 LLM providers through a unified `Model` interface, allowing for easy switching between different models and providers .\n* **Structured Outputs**: Automatic Pydantic validation and reflection/self-correction ensure structured and reliable outputs from LLMs .\n* **Comprehensive Observability**: Integration with OpenTelemetry and native Logfire provides real-time debugging, performance monitoring, and cost tracking .\n* **Production-ready Tooling**: This includes an evaluation framework (`pydantic-evals`), durable execution capabilities, and various protocol integrations like MCP, A2A, and AG-UI .\n* **Graph Support**: It provides a way to define graphs using type hints for complex applications .\n\n## Framework Architecture\n\nThe framework is structured as a UV workspace monorepo, containing several packages .\n\n### Core Packages\n\n* `pydantic-ai-slim`: Contains the core framework components such as `Agent`, `Model`, and tools .\n* `pydantic-ai`: A meta-package that includes all optional extras .\n\n### Supporting Packages\n\n* `pydantic-graph`: Provides the graph execution engine with `Graph` and `BaseNode` .\n* `pydantic-evals`: An evaluation framework for datasets and evaluators .\n* `examples`: Contains example applications .\n* `clai`: Provides a CLI interface .\n\n## Agent Execution Flow\n\nThe `Agent` class serves as the primary orchestrator . Agent execution is graph-based, utilizing a state machine from `pydantic_graph.Graph` . The execution involves three core node types:\n\n* `UserPromptNode`: Processes user input and creates initial `ModelRequest` .\n* `ModelRequestNode`: Calls `model.request()` or `model.request_stream()` and handles retries .\n* `CallToolsNode`: Executes tool functions via `RunContext[Deps]` .\n\nThe `Agent` provides methods like `run()`, `run_sync()`, and `run_stream()` for different execution scenarios .\n\n## Model Provider Support\n\nThe framework offers a unified `Model` abstract base class for various LLM providers . This includes native support for providers like OpenAI, Anthropic, Google, Groq, Mistral, Cohere, and Bedrock . Additionally, many OpenAI-compatible providers can be used with `OpenAIChatModel` .\n\n## Tool System\n\nTools are registered using the `@agent.tool` decorator . The system automatically generates JSON schemas from function signatures and docstrings, validates tool call arguments, and provides context injection via `RunContext[Deps]` .\n\n## Observability Integration\n\nPydantic AI integrates with OpenTelemetry, allowing for instrumentation of agent runs, model requests, and tool executions . It has native integration with Pydantic Logfire for enhanced monitoring and visualization .\n\n## Evaluation Framework\n\nThe `pydantic-evals` package provides a framework for systematically testing and evaluating AI systems . It supports defining datasets with `Case` objects and using various evaluators, including built-in and custom ones .\n\n## Integration Ecosystem\n\nPydantic AI supports various integrations for development and production:\n\n* **Model Context Protocol (MCP)**: For external tool server access .\n* **AG-UI Protocol**: For interactive application frontends .\n* **Agent2Agent (A2A)**: For multi-agent communication and workflows .\n* **Temporal**: For durable workflow execution .\n* **DBOS**: For database-backed execution and state persistence .\n\n## Notes\n\nThe `CLAUDE.md` file provides guidance for Claude Code when working with the repository, including development commands and an overview of core components and design patterns . The `mkdocs.yml` file defines the structure and content of the project's documentation, including navigation, plugins, and watch directories for various packages . The `docs/install.md` file details how to install the `pydantic-ai` package and its optional components, including a \"slim\" installation option for specific model dependencies .\n\nWiki pages you might want to explore:\n- [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1)\n\nView this search on DeepWiki: https://deepwiki.com/search/what-is-the-pydanticpydanticai_e234e9cf-d4aa-4c67-a257-56034816dd56\n","server_label":"deepwiki"}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":14,"output_index":3,"item":{"id":"rs_00b9cc7a23d047270068faa0f4ff54819f9fb9ff25bebe7f5f","type":"reasoning","encrypted_content":"gAAAAABo-qD1miEnlv3KYY7Tnd-Zet3qT7T7XT3ICDPUOMFO1oxoegeysmmg644AReWW7dngeD_KVQU47kbVLfC8ML-PoUzyGyDJ18kpnXzusEc1fq4JuI-MeRF__9ca5ttvj5BnnEYygB-pE8ZVYRL6E2fHvS1xvLGkHLAneKaGct7b66NxcRs228MFuhEUyNZqX6pcatwlr_AUY_MerbJhYlr8mugq57Ubp-fFpPte56SrQJMEh6gwQwcJdPiMsMMhdBxlSy5pAusQEkes1ebhKfoSv6J9vZI3VDI30BXBCmHGcB_vlGXBBY4IAKWSOo8W4kq63Z1FCx1KuCigLl683O10-csG6FqacvZk7Gf0_4-LiWO8JC7jpjCAcCXYWltRa-aop30PNUYdpdcTKOc4HnEaDRcxu0-d6D98ogNLeXABDn2ZqZnxdTCciOXrh1Uj3IoLHCHXaeZvpOFFNzr1M6ipCMGTl8aOTGLrSvRq2aUP3TVE2V2ZDNLSSuf61QTP6TN_G6E1k9AijUPfXvzr1gMYjZsGzjTLYYrUWd_kjHwrU0IBg-lkEEr2uNaJfL6HQMkgwaRPEus5DpLEgGqZ79bLKzIh4x91nxAZt7RANgCYaMwNwsVkaRyKmmdekHUXMvUZ6KrN1tt1j_eQtTYIbbwcNisffm3LeaLP09O1mNwpNzMG9I7X3QXReiKJGuIB_dMLtqYhevinp0S1TUphXhmJjNs1RLIc7pYvuffKGp_y7PnBL_Muqso6ya7_6WcIFUEJYJMHrLAux_RDsEpwVW3UwB7dc051AOzs-6dyJvWJblsyIo-P2kycp5i8t6gabtfjXuWj","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":15,"output_index":3,"item":{"id":"rs_00b9cc7a23d047270068faa0f4ff54819f9fb9ff25bebe7f5f","type":"reasoning","encrypted_content":"gAAAAABo-qD2WTMmhASwWVtFPlo7ILZP_OxHfRvHhda5gZeKL20cUyt0Np6wAHsJ6pyAsXCkLlKBVz3Vwm52JrJuUbqmw-zlXL19rbpvTPRMkiv_GdSfvmxKKNJvSm417OznBDVjsIAqmes2bMq03nRf6Pq2C0oUJnIbpbMwtWzs3jMQqUb0IwyopqXGhn3MWKctLPKZS89nyL4E9kJAx_TyWTQvME8bf8UrV8y2yrNz9odjSQQyZq5YXrlHzpOJjDTfLofVFjsEzM8J29SdLcWnqlv4djJ8xeMpP2ByXuHRnTEyNNuxpYJB7uQbYT0T_eLhwcLv2ZzDZ_hf2Msv7ZdyuPc7Yxc5YWlChB0iaHqQ_8UuMjIVurfgSIjSq2lTvJwdaA365-ZoBMpo4mG04jQDP3XM-0xEM6JTFWc4jZ1OjIXVpkjaXxdOOkYq3t3j8cqBQH69shFCEQr5tnM8jOEl3WHnkvaBg4xEMcd61hiLOKnWbQiYisbFucA8z5ZNbdohUZd-4ww0R8kSjIE5veiyT66gpIte0ItUnTyhIWy8SZYF9bnZGeS-2InDhv5UgjF2iXzgl6dmUrS-_ITgJkwu4Rdf9SBDJhji3_GUO9Za0sBKW8WohP142qY0Tbq4I6-7W1wJ3_gHJqiXVwDLcY90ODSyyC5_I3MgaALRC1wt55sHSeSsDjmNGmiH-m0snaqsI0JnAZwycnWCK17NamjQ9SxVM5tTqJgemkGFQNH1XhZPWvVj56mlj74KKbCJALQpdXD27C8LfdrlBd0v_zEmF1dh7e12I95fYeAlO51xOglBaMCgcMWSDHMGHsJBbJ04eVQSwYTl72rmkASTMaybD-aAm1m8qZnKU-f3xQradhs9l1x9eOfQDIsfWMr1aVMiZi59--VsrgYCbqBj7AGf8n6VNbQWkhO2etozwYZcdGIyiu4TaULX1Xp89Gb28M-tVkIrkQoHO_Z7wzKU1HRBViES1wRKUJ-Sa6wc8UP5orDxeOTFPUr7JL-qaj49cpKzvdlfuoIdbYwpsNvAg69sNbFI3w4jLxOT4yxS6thra1Bit6SY5wAEfrrjtzofLeg49aFqFVGIHeJ8kE3spc1rctpETkdHNyP9fEjZaM3mxR4yz0tPmEgUsd-sdw5BbOKDAVzwconmbeGBmf9KLXMEpRRH7-qSIWUscCi5qIdHXGYoQkStsNGrnhucn_hwqZCSti3Kbzfosud3zQPjW6NyuJCdeTxbDbsnrV7Lkge5j92pyxCHw9j0iuzofRW55_KToBtIvRoPr_37G_6d6TxK42mKqdbgk9GHrcXf27mXszCEzX-VfRVTxyc6JLfEy1iikdo-J2AzXPd4m3zE-zazBU3Z5ey596g8gxwXMkHakLrvwp4_-fQfcvs7sIH34xkEhz7BRdNok3Aqbu_zCt2np69jjHqfPQWZzAy1C-bmMuhAaItPYkkw-LgSu-YP6L89zNofK9Q_S3JwVsLN-fq-9OwhSjy_rQu22Gn4KD6saAu61QMXBPa6z0QJSFUZHJQ_megq1tENfB6wRVtQ0DdAvUwhUsMwx6yE9CT20bma4CloGW__aZuD9gikdQrQ1DCHOvTrfEpvHkl6-wuCImeNjsCvbRFAkx6Xgpc6fdbq4j6WyEVW_4VePNknFWYZ1cw795ka5uJMLc3hVughVlGwDbw60Q3utsjHPbu03pxPle5pdcVEYSQWa0WbFDCrF4ysK0lpmlF7","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":16,"output_index":4,"item":{"id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","type":"message","status":"in_progress","content":[],"role":"assistant"}} + + event: response.content_part.added + data: {"type":"response.content_part.added","sequence_number":17,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"part":{"type":"output_text","annotations":[],"logprobs":[],"text":""}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":18,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"p","logprobs":[],"obfuscation":"Vk0HCjd5u0Ukrjt"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":19,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"yd","logprobs":[],"obfuscation":"6SpGryhRaxuU0z"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":20,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"antic","logprobs":[],"obfuscation":"raUie3MMXRO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":21,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"SRoCsysoV5hjgF3"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":22,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"ai","logprobs":[],"obfuscation":"6mlirEN7cMtwyk"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":23,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" is","logprobs":[],"obfuscation":"LC0bR3L7jcGal"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":24,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" a","logprobs":[],"obfuscation":"yBhWnwngkzY3Pd"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":25,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Python","logprobs":[],"obfuscation":"dxoaw5ycm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":26,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" “","logprobs":[],"obfuscation":"K647qKBMWTs3R2"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":27,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"agent","logprobs":[],"obfuscation":"mQ0BRipYOWP"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":28,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"”","logprobs":[],"obfuscation":"nQggGjehqaILYlW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":29,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" framework","logprobs":[],"obfuscation":"11aycr"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":30,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" that","logprobs":[],"obfuscation":"kwpW4p1X9tA"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":31,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" brings","logprobs":[],"obfuscation":"xas9pHx9X"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":32,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" P","logprobs":[],"obfuscation":"qSIdJK5RW7zpvm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":33,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"yd","logprobs":[],"obfuscation":"Bz2DlYgkm1wWeT"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":34,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"antic","logprobs":[],"obfuscation":"J7S5YzOMYV7"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":35,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"’s","logprobs":[],"obfuscation":"Z65SKcwCVrE1gN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":36,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" type","logprobs":[],"obfuscation":"rS2Kq1pJFD9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":37,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"duyIcQa0XlSj7Q3"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":38,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"s","logprobs":[],"obfuscation":"GWc25zWXEoq0Ksq"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":39,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"afety","logprobs":[],"obfuscation":"s6TWDxCGgEj"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":40,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" and","logprobs":[],"obfuscation":"EW7Uj221LlU4"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":41,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Fast","logprobs":[],"obfuscation":"fwNlRqdQkXy"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":42,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"API","logprobs":[],"obfuscation":"eoR7MOQMsy8j3"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":43,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"5jGwL7h4O8pD6Fh"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":44,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"style","logprobs":[],"obfuscation":"DN8rxIsx3pO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":45,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" ergonom","logprobs":[],"obfuscation":"2Id0DfHE"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":46,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"ics","logprobs":[],"obfuscation":"aoxJEuoBa2b40"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":47,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" to","logprobs":[],"obfuscation":"D5ZolLYrOJYKW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":48,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" building","logprobs":[],"obfuscation":"qo3rGX1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":49,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" production","logprobs":[],"obfuscation":"tKqaM"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":50,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"9IndBLsG3x5kSCx"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":51,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"grade","logprobs":[],"obfuscation":"nGJ5ZMlgy6c"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":52,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Gener","logprobs":[],"obfuscation":"gWRLEicOBO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":53,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"ative","logprobs":[],"obfuscation":"wxOROxrGESD"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":54,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" AI","logprobs":[],"obfuscation":"JmbJrw6WvYPbF"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":55,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" apps","logprobs":[],"obfuscation":"kvO8ldQx5KJ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":56,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":".","logprobs":[],"obfuscation":"EYjoFqIBF7NCzJf"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":57,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" It","logprobs":[],"obfuscation":"B5gEGEO7OTeFl"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":58,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" provides","logprobs":[],"obfuscation":"im14xZ8"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":59,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":":\n\n","logprobs":[],"obfuscation":"Q5t4TUjPRWdGO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":60,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"•","logprobs":[],"obfuscation":"Al1wZ4EcCeieNq5"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":61,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" A","logprobs":[],"obfuscation":"yxqUA9VrQYo7wV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":62,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" generic","logprobs":[],"obfuscation":"PZ81xh6W"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":63,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Agent","logprobs":[],"obfuscation":"TpbTbShYtC"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":64,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"[","logprobs":[],"obfuscation":"blAgJ2uz36NAI4W"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":65,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"Deps","logprobs":[],"obfuscation":"Wjp7lTFRZ04v"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":66,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"MgWqWRPZQLhpHEI"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":67,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" ","logprobs":[],"obfuscation":"bRauUTk1Qo7zLpV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":68,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"Output","logprobs":[],"obfuscation":"PZIA5Qc97F"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":69,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"]","logprobs":[],"obfuscation":"MeYkpbC4yzNXwrI"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":70,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" API","logprobs":[],"obfuscation":"6W2CWqJVuzHu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":71,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" with","logprobs":[],"obfuscation":"O7aeU39ccu1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":72,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" automatic","logprobs":[],"obfuscation":"x8Xp7l"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":73,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" P","logprobs":[],"obfuscation":"GhSptnlIYZf1n1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":74,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"yd","logprobs":[],"obfuscation":"z5zORzwibyPUsU"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":75,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"antic","logprobs":[],"obfuscation":"AjdBsN7aLZv"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":76,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" validation","logprobs":[],"obfuscation":"O6cLx"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":77,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" \n","logprobs":[],"obfuscation":"DldIbZeck1Qim"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":78,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"•","logprobs":[],"obfuscation":"hEEqhMuZjTsDQIS"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":79,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" A","logprobs":[],"obfuscation":"TiFRZM9aofCzTY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":80,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" unified","logprobs":[],"obfuscation":"ojoxjcAJ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":81,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Model","logprobs":[],"obfuscation":"89sUQo1K2O"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":82,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" interface","logprobs":[],"obfuscation":"ul2588"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":83,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" supporting","logprobs":[],"obfuscation":"Jg7iG"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":84,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" ","logprobs":[],"obfuscation":"ODfWoFvaTNXudsc"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":85,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"15","logprobs":[],"obfuscation":"zY9ZUUcQv6verB"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":86,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"+","logprobs":[],"obfuscation":"JDn5ILVBXAv404I"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":87,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" L","logprobs":[],"obfuscation":"xPidc8ZAMxMPSy"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":88,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"LM","logprobs":[],"obfuscation":"21Y3yDKtJzrMte"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":89,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" providers","logprobs":[],"obfuscation":"6ufTEC"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":90,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" (","logprobs":[],"obfuscation":"MsE78OJOE4pKMo"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":91,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"Open","logprobs":[],"obfuscation":"vR0jr5cOYy7f"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":92,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"AI","logprobs":[],"obfuscation":"HrXXJBCsaMOOfy"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":93,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"KvtLACZg0ZYe1qW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":94,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Anth","logprobs":[],"obfuscation":"OCU6no50mSG"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":95,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"ropic","logprobs":[],"obfuscation":"7wQMMZZKQr7"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":96,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"UuhVFh8pc0fN7DC"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":97,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Google","logprobs":[],"obfuscation":"9yPQBOqNs"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":98,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"5TsKkpsTiAt1NGv"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":99,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" etc","logprobs":[],"obfuscation":"eBZrQh12Gksz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":100,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":".)","logprobs":[],"obfuscation":"Nztw4EMo9Y1sT8"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":101,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" \n","logprobs":[],"obfuscation":"r7VgQ8ngLHoTI"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":102,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"•","logprobs":[],"obfuscation":"fpL4pKxn1oC9vPh"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":103,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Structured","logprobs":[],"obfuscation":"MY0CI"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":104,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" output","logprobs":[],"obfuscation":"ZaASMRPXe"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":105,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" correction","logprobs":[],"obfuscation":"Wexsr"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":106,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"rJwq5nDTnsFip2f"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":107,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" dependency","logprobs":[],"obfuscation":"fft0N"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":108,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"OOaHM38ijBwBYCt"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":109,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"in","logprobs":[],"obfuscation":"EPEETz1wMlLpFz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":110,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"jection","logprobs":[],"obfuscation":"1r6kMfvNY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":111,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" tools","logprobs":[],"obfuscation":"EWysz5tJ6M"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":112,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"IIMft7M8z7JvLAa"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":113,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" and","logprobs":[],"obfuscation":"jpoC9nnFKLWW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":114,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" graph","logprobs":[],"obfuscation":"lWIKoy6lhi"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":115,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"zt6eZptvrhzILbB"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":116,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"based","logprobs":[],"obfuscation":"TBoP64tFXUa"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":117,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" execution","logprobs":[],"obfuscation":"wMh75h"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":118,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" \n","logprobs":[],"obfuscation":"vVH0pFi5zkpAn"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":119,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"•","logprobs":[],"obfuscation":"bu6fyXsjDNzLxoA"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":120,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Built","logprobs":[],"obfuscation":"RIPtMLTNXH"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":121,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"zh2kx4pTAZ6XWm9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":122,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"in","logprobs":[],"obfuscation":"ZTI0jz5VQlRIiF"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":123,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" observ","logprobs":[],"obfuscation":"McJaXhYhq"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":124,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"ability","logprobs":[],"obfuscation":"AlUN7lSYb"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":125,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" (","logprobs":[],"obfuscation":"k5eMyeaTipLmmQ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":126,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"Open","logprobs":[],"obfuscation":"zdyarhHO8Lad"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":127,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"Telemetry","logprobs":[],"obfuscation":"BVliKci"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":128,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"WfNINHscLTtisIs"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":129,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" Log","logprobs":[],"obfuscation":"opPnJswz4zmY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":130,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"fire","logprobs":[],"obfuscation":"oGEav7Sc76QC"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":131,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"),","logprobs":[],"obfuscation":"RcWchXNmkhoN2z"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":132,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" evaluation","logprobs":[],"obfuscation":"d9aYa"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":133,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" (","logprobs":[],"obfuscation":"xp9lZxC2M7Oyds"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":134,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"p","logprobs":[],"obfuscation":"zqSJB4QkblpE93X"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":135,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"yd","logprobs":[],"obfuscation":"ILexU9cypuvBUG"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":136,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"antic","logprobs":[],"obfuscation":"AdUc1DhC6RW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":137,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"Rm8lGWoIfrDBNgy"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":138,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"eval","logprobs":[],"obfuscation":"lUUtfgctfxko"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":139,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"s","logprobs":[],"obfuscation":"6XRBwbaoIx1jRS0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":140,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":")","logprobs":[],"obfuscation":"Acy2IsrwbG2T8NN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":141,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" and","logprobs":[],"obfuscation":"rEYH128M0Kn6"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":142,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" durable","logprobs":[],"obfuscation":"zGq60Z8t"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":143,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" workflows","logprobs":[],"obfuscation":"WRhvpy"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":144,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" \n","logprobs":[],"obfuscation":"BQl47q1cYlKY4"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":145,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"•","logprobs":[],"obfuscation":"gYOw04z0IgMxFxH"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":146,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" A","logprobs":[],"obfuscation":"WuLCIMqFi6OVMm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":147,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" mon","logprobs":[],"obfuscation":"yjlw33XQfHhT"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":148,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"ore","logprobs":[],"obfuscation":"HyEsv5mimIA1Z"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":149,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"po","logprobs":[],"obfuscation":"lm5vRlldQ6lZQi"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":150,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" split","logprobs":[],"obfuscation":"Yy3VwjQBBr"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":151,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" into","logprobs":[],"obfuscation":"27TarmAx3Pi"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":152,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" core","logprobs":[],"obfuscation":"bIHC8U5u2QT"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":153,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" (","logprobs":[],"obfuscation":"HXHXCpC8Mn4dkT"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":154,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"p","logprobs":[],"obfuscation":"0S8wbZqXXKNopRK"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":155,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"yd","logprobs":[],"obfuscation":"7M3l9MgnyTiajN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":156,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"antic","logprobs":[],"obfuscation":"SytueD9Usiw"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":157,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"afrAEsUNglokdoW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":158,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"ai","logprobs":[],"obfuscation":"6uoBBzPDae4wfk"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":159,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"uxI9DPqv8MU8HX6"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":160,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"sl","logprobs":[],"obfuscation":"ZMxqWJdxwRI3di"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":161,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"im","logprobs":[],"obfuscation":"j2pzc2ge1Nc46y"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":162,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"),","logprobs":[],"obfuscation":"VglmfJ1eDh4Ef8"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":163,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" graph","logprobs":[],"obfuscation":"MMlPaFP3Nm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":164,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"5sh4qG9p0kheVrl"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":165,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" eval","logprobs":[],"obfuscation":"zON6cUUdXx7"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":166,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"s","logprobs":[],"obfuscation":"2OagaIH4Z4eTVGn"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":167,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"SAtcC8qdakYQYkw"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":168,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" CLI","logprobs":[],"obfuscation":"0s2JGMkXMK1l"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":169,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" and","logprobs":[],"obfuscation":"CymCLHx402em"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":170,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" example","logprobs":[],"obfuscation":"X5tQAOjW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":171,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" packages","logprobs":[],"obfuscation":"b20LEnM"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":172,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" \n\n","logprobs":[],"obfuscation":"9FsOCnqhXgfu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":173,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"In","logprobs":[],"obfuscation":"NO7bfpmf5fFH9V"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":174,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" short","logprobs":[],"obfuscation":"7nD6I4HNIx"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":175,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"DgTnvSvq2DZmRb8"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":176,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" it","logprobs":[],"obfuscation":"iJ7wylz6lFyCW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":177,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"’s","logprobs":[],"obfuscation":"BDDgqILDjIWmJq"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":178,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" a","logprobs":[],"obfuscation":"FOH2iogfhVdbxu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":179,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" turnkey","logprobs":[],"obfuscation":"TAuKB7Vv"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":180,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":",","logprobs":[],"obfuscation":"8vT4MvqENRQTfcu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":181,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" type","logprobs":[],"obfuscation":"eUsygt1b7HJ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":182,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"-","logprobs":[],"obfuscation":"5ZaSjzjyI3ZITkz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":183,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":"safe","logprobs":[],"obfuscation":"ZME2KQ64ls9D"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":184,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" scaffold","logprobs":[],"obfuscation":"YwlsuaO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":185,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" for","logprobs":[],"obfuscation":"0epVQTynu8sV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":186,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" AI","logprobs":[],"obfuscation":"SOKdQJ4uX7NqZ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":187,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" agent","logprobs":[],"obfuscation":"GPazeoLNzO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":188,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":" development","logprobs":[],"obfuscation":"lbIU"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":189,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"delta":".","logprobs":[],"obfuscation":"53eDhc8T7GOaL0x"} + + event: response.output_text.done + data: {"type":"response.output_text.done","sequence_number":190,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"text":"pydantic-ai is a Python “agent” framework that brings Pydantic’s type-safety and FastAPI-style ergonomics to building production-grade Generative AI apps. It provides:\n\n• A generic Agent[Deps, Output] API with automatic Pydantic validation \n• A unified Model interface supporting 15+ LLM providers (OpenAI, Anthropic, Google, etc.) \n• Structured output correction, dependency-injection tools, and graph-based execution \n• Built-in observability (OpenTelemetry, Logfire), evaluation (pydantic-evals) and durable workflows \n• A monorepo split into core (pydantic-ai-slim), graph, evals, CLI and example packages \n\nIn short, it’s a turnkey, type-safe scaffold for AI agent development.","logprobs":[]} + + event: response.content_part.done + data: {"type":"response.content_part.done","sequence_number":191,"item_id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","output_index":4,"content_index":0,"part":{"type":"output_text","annotations":[],"logprobs":[],"text":"pydantic-ai is a Python “agent” framework that brings Pydantic’s type-safety and FastAPI-style ergonomics to building production-grade Generative AI apps. It provides:\n\n• A generic Agent[Deps, Output] API with automatic Pydantic validation \n• A unified Model interface supporting 15+ LLM providers (OpenAI, Anthropic, Google, etc.) \n• Structured output correction, dependency-injection tools, and graph-based execution \n• Built-in observability (OpenTelemetry, Logfire), evaluation (pydantic-evals) and durable workflows \n• A monorepo split into core (pydantic-ai-slim), graph, evals, CLI and example packages \n\nIn short, it’s a turnkey, type-safe scaffold for AI agent development."}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":192,"output_index":4,"item":{"id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","type":"message","status":"completed","content":[{"type":"output_text","annotations":[],"logprobs":[],"text":"pydantic-ai is a Python “agent” framework that brings Pydantic’s type-safety and FastAPI-style ergonomics to building production-grade Generative AI apps. It provides:\n\n• A generic Agent[Deps, Output] API with automatic Pydantic validation \n• A unified Model interface supporting 15+ LLM providers (OpenAI, Anthropic, Google, etc.) \n• Structured output correction, dependency-injection tools, and graph-based execution \n• Built-in observability (OpenTelemetry, Logfire), evaluation (pydantic-evals) and durable workflows \n• A monorepo split into core (pydantic-ai-slim), graph, evals, CLI and example packages \n\nIn short, it’s a turnkey, type-safe scaffold for AI agent development."}],"role":"assistant"}} + + event: response.completed + data: {"type":"response.completed","sequence_number":193,"response":{"id":"resp_00b9cc7a23d047270068faa0e25934819f9c3bfdec80065bc4","object":"response","created_at":1761255650,"status":"completed","background":false,"error":null,"incomplete_details":null,"instructions":"You are a helpful assistant.","max_output_tokens":null,"max_tool_calls":null,"model":"o4-mini-2025-04-16","output":[{"id":"mcpl_00b9cc7a23d047270068faa0e29804819fb060cec0408ffbcd","type":"mcp_list_tools","server_label":"deepwiki","tools":[{"annotations":{"read_only":false},"description":"Get a list of documentation topics for a GitHub repository","input_schema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"}},"required":["repoName"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"},"name":"read_wiki_structure"},{"annotations":{"read_only":false},"description":"Ask any question about a GitHub repository","input_schema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"},"question":{"type":"string","description":"The question to ask about the repository"}},"required":["repoName","question"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"},"name":"ask_question"}]},{"id":"rs_00b9cc7a23d047270068faa0e4cd5c819f8855c183ff0fe957","type":"reasoning","encrypted_content":"gAAAAABo-qD3VQyikAL6DCqJekitghySUvSx5cViETKf2rsb94u6S0mbFNnm2mh9aa32GgswtK2C_BqnLLm9L3ivBOFs2dz03CFqhhhZo7GexruY8g4kraqqLyzq-rc51XuGx_qDfkzSUWNXLsZH-DUT-m6kcFps7dBSfduvEmBl9rD0AdHQJ6qiXfFhH8ypvLQ6bzbG1YR5MYdRrp4Pq66_1TVGOIO5Vx41-bKqVcp48tbfR1MGY0HCJr_ILNrxLkhEbe6EeV571xXLLHxe1njl5rFrSY2uGZGJTPyQq3nHKKN3mtJCl98unOjp78G0k9QarFFMdtMVDPddKbn2UJYNdSWb6QummGHTGxRTnYGyuL5fWEr3pAi6KAvHG63rowNA8eyTiv4ESGCJjUUln0JG1WCg2Q_ReZffxGbWLTiTFN293m_hGHXvuBhdiquDkbE-WyDrkehT-CnEbIpykLzokMuN7ZYwtij1kM6YB-zOeJxD5HqmkccqNfq7DfgUMC1kwDwFfq0gfT2UE9Aaec6n7uPGYdhT1jaPXAn2LNAmLbxHcc6trmO7kxKYAZy0PWp_WnXRqvfyIwiULolp1XF83caKbK6Ly1Tv3dJ_m3I0GkT6ZfHnB7Xe198ie0T5Mxo31deY8CycgRhdBDbdOCVnzAVScT8LX5J9rGhtUOvj4J4yutrUHPWoxNn3nXTsOoRvWPI7QI4KmcYHdzPNxlVJFPpNqSdnR4DYZLnTjHbzWxXRDkTGuhKdfixrdpHZbZN1x4CIQNowvwT3uzzCGBknf12srpspSXXs6mi0H0EnEbuwiwGTbdSjaDgxPlVK7yCMDfOnqa2UJe0bIIX8na55o_Au5hxI5Bw323xIdYwkINGMfgy0VVViiGDuYyKSzYrpI1uOVZ7qCCM-Bxh9FwOBLhfa3JiswSfzRCwUYjHb5boDnOFI5d5B5219E0iVKLQ8rkkMNP5KrrTnLqvqJLzwMd588vz1WijLrruV9UIC3e361OitNpIinbTr0MjTV8Qo6Qv9pJ-1hTwiSXkgT-DVwrFSwKuVzCxlEZFEAEhsMXlb1Ea3j7lj-5_AofaInCM5NcYSm-tMRif5t9yYLRiEyb-VRRaLRmpQjyTNtMOZaf8pSYDKrfTTzrWEbA1rp1_btKBcS6pFKsZ7CQQ_nxiRTsjomVjLO7TrmoSu_A9zcxZQnb2aKaVMuavLosKlhxAofnr10AIAUU_to2SErDn23TsLgvpLQz0cjdlyDSf8ixcpaJio0ztgFBT0SJ2iOTT_iBJAeo7JMIFULG8ZFpmFB3EHJWaQEzyXlc_MNDK51pHP-c1K7wJmDnQQT4u8NWejHzGpA90l8TXN8Q8QmDmSRqok24w4PJfQQ8gq87hPDJkMJ-bK_5u-NGq46atVmOvT8G6Zu98wR4ilfjy6IoV1vN_tlvITbyG67ds9smj6qly45LJFZEQQXxltY5OBnxX0qR_7nHlyib8h9R_AdRyUmXVegPtJ-yKXAXYcl_TStDiY-ipyRas8qUR7Sf3yRnsGbsMYiZ5e6HKLWv3yVeBzBgszYTS5NS42VVQWRYlxWU33ufyu-XXJ9_POd-l3ypjguz5f2wcecsV-HumEOB07CowEw0wlvLnOzdJh76Ps1owfqO7KRBFUbU7XjyVDvgt8C_Rk-Ce3oWGhk_mA6n_pcNvTzNW8evx-2QzVIkiO5dVP2aJO1J1rngWPpw4G6PD2AA99wP32c5c8FYpjan7akGZgDCCSLXrdEKUtAZAZtn0BtGCR8zgokW6mx4_628FqyGDjGgsQSSACUBe0hHehlWop5Zx-nQ==","summary":[]},{"id":"mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac","type":"mcp_call","status":"completed","approval_request_id":null,"arguments":"{\"repoName\":\"pydantic/pydantic-ai\",\"question\":\"What is the pydantic/pydantic-ai repository about?\"}","error":null,"name":"ask_question","output":"The `pydantic/pydantic-ai` repository is a Python agent framework designed to simplify the development of production-grade applications using Generative AI . It aims to bring the ergonomic developer experience and type-safety philosophy of Pydantic and FastAPI to AI agent development .\n\n## Core Purpose and Features\n\nThe framework focuses on providing a robust and type-safe environment for building AI agents . Key features include:\n\n* **Type-safe Agents**: Agents are generic `Agent[Deps, Output]` for compile-time validation, leveraging Pydantic for output validation and dependency injection .\n* **Model-agnostic Design**: It supports over 15 LLM providers through a unified `Model` interface, allowing for easy switching between different models and providers .\n* **Structured Outputs**: Automatic Pydantic validation and reflection/self-correction ensure structured and reliable outputs from LLMs .\n* **Comprehensive Observability**: Integration with OpenTelemetry and native Logfire provides real-time debugging, performance monitoring, and cost tracking .\n* **Production-ready Tooling**: This includes an evaluation framework (`pydantic-evals`), durable execution capabilities, and various protocol integrations like MCP, A2A, and AG-UI .\n* **Graph Support**: It provides a way to define graphs using type hints for complex applications .\n\n## Framework Architecture\n\nThe framework is structured as a UV workspace monorepo, containing several packages .\n\n### Core Packages\n\n* `pydantic-ai-slim`: Contains the core framework components such as `Agent`, `Model`, and tools .\n* `pydantic-ai`: A meta-package that includes all optional extras .\n\n### Supporting Packages\n\n* `pydantic-graph`: Provides the graph execution engine with `Graph` and `BaseNode` .\n* `pydantic-evals`: An evaluation framework for datasets and evaluators .\n* `examples`: Contains example applications .\n* `clai`: Provides a CLI interface .\n\n## Agent Execution Flow\n\nThe `Agent` class serves as the primary orchestrator . Agent execution is graph-based, utilizing a state machine from `pydantic_graph.Graph` . The execution involves three core node types:\n\n* `UserPromptNode`: Processes user input and creates initial `ModelRequest` .\n* `ModelRequestNode`: Calls `model.request()` or `model.request_stream()` and handles retries .\n* `CallToolsNode`: Executes tool functions via `RunContext[Deps]` .\n\nThe `Agent` provides methods like `run()`, `run_sync()`, and `run_stream()` for different execution scenarios .\n\n## Model Provider Support\n\nThe framework offers a unified `Model` abstract base class for various LLM providers . This includes native support for providers like OpenAI, Anthropic, Google, Groq, Mistral, Cohere, and Bedrock . Additionally, many OpenAI-compatible providers can be used with `OpenAIChatModel` .\n\n## Tool System\n\nTools are registered using the `@agent.tool` decorator . The system automatically generates JSON schemas from function signatures and docstrings, validates tool call arguments, and provides context injection via `RunContext[Deps]` .\n\n## Observability Integration\n\nPydantic AI integrates with OpenTelemetry, allowing for instrumentation of agent runs, model requests, and tool executions . It has native integration with Pydantic Logfire for enhanced monitoring and visualization .\n\n## Evaluation Framework\n\nThe `pydantic-evals` package provides a framework for systematically testing and evaluating AI systems . It supports defining datasets with `Case` objects and using various evaluators, including built-in and custom ones .\n\n## Integration Ecosystem\n\nPydantic AI supports various integrations for development and production:\n\n* **Model Context Protocol (MCP)**: For external tool server access .\n* **AG-UI Protocol**: For interactive application frontends .\n* **Agent2Agent (A2A)**: For multi-agent communication and workflows .\n* **Temporal**: For durable workflow execution .\n* **DBOS**: For database-backed execution and state persistence .\n\n## Notes\n\nThe `CLAUDE.md` file provides guidance for Claude Code when working with the repository, including development commands and an overview of core components and design patterns . The `mkdocs.yml` file defines the structure and content of the project's documentation, including navigation, plugins, and watch directories for various packages . The `docs/install.md` file details how to install the `pydantic-ai` package and its optional components, including a \"slim\" installation option for specific model dependencies .\n\nWiki pages you might want to explore:\n- [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1)\n\nView this search on DeepWiki: https://deepwiki.com/search/what-is-the-pydanticpydanticai_e234e9cf-d4aa-4c67-a257-56034816dd56\n","server_label":"deepwiki"},{"id":"rs_00b9cc7a23d047270068faa0f4ff54819f9fb9ff25bebe7f5f","type":"reasoning","encrypted_content":"gAAAAABo-qD3K7EGpLRQ1wIzsTvIeVZdZJsH3vNs16W9DRG-xQgJDG2JMaCu4rFRnWLHsQbqYOhfrPdoWQ2rIcf4g9usyeouSK18fj1gYHZjfad18ere7MQynQXerSFlAqJ7U08_ZDtADGrMhLBBDnavF4yDQ_GpRCDoRDD9Z-0sXr3T57cIBheFHBw4nd6UGaI9zNSLD-rwbmITJp_C7bHeP3Ho0XSwnFd9ekQmulSQwJmLRmTGhrZPIWySG-K6VdzESg1rgz5EV82d3IbRnJkW5mcKNHJq8KtgLpWi9z1BnjEFBcPIL2liXOVG7_yLgFtZNqSMpj625xc0sfr7Le5sYjMsb5kVzinr6DLytefN-Pl7xfGIUIQDayhrwORd5opkSmLnVpFCpDcDWb2WP-Z2OPS7WOtwBli5autVtqztWoWX-dxPWE6ndrJlJGNXCaorvjJXZElp7ZpJHtRCnMfIpgZayU3-OVKJ4D-aqM98VwB-pXeotFf5-mpn93bwmdcM8R8c2WqcJXhnbM6m3v-bZGX_V_Io6nOEjBRWL3XzDJbsPlVTFgofTlsllJhpIDlxxPUPmvq9OchQyPekMYtnfVtXxVOzYZz2_DmM5VobsMoNNZo05LN69PHGzhC5ME9WZZDBzNFl_GMyOU35wTaUYNudwsUiqFiFy-yuuxxHDbrqxedEtlS-bkUVohcA3HM3Mkt5Nb8EzAWndFhZpzCDmADwcDtBYbu4NEUYkIolFoKRg2il_JrqXBard8KNrg_He1amZqDeq_zbFchrDOI2TR0WJf5kZGrPElKI8BV4A47MfuQpzlsn6-5I9UwF4lVW58Te_qEuBvqPhoenFzOVv6LK5WXjV2cSmzqXbfxRLOIad8VPNZxy-kFiyC1flIU_b19WRAh5LwE_far3TwMh23pyw5skqk6tkNCV41qpMxEqU1ZWT_rBG1GCbUsRvlnqmE_01XbHCnmWPtzd7n-oALTf2Ll3HbPis297MxSZLq8yMWPpkbnzbs65KZsu_8g-N5wpRUSzmim31KytQtohQzNDgUxKKnGxYplPkR02LTJzAwMg_0cDSrU6piF2BQUZirEo-mPR9g6IDGl421Ln6b0-q05-Ccd24BwTO1UJ-zgLHgTEanHRtl9dYmR6tguOxz1A_KcolnCKZq599US868kZrMkkR1X4BlrUEVIOzbLdThR8bzqagUy3u2LztGh58sNb4Bf4QsRrWKD42CIP3UoxDkG7mJec2T_jCfLslAQ9HkN_HWgJTbDPCeQHEqgVqJml_sYXQaf2n4bcYNtYAIBAAK-q1LXNOZDaqRPnI4HL29cAh5zIINZCX5E5O7xyzZC-IXoNE2BL5U_aqRUJ5yJscejcQUrOczgZLvEWzYuKBMvCRol-gNb2NdvOO_fEptU5CWOIod-9hvOJhlnNWX0G8VLjSU9ayQs8GfXn6b8QycmMbzrl-EclhzYm0cnLy757hKMiaUKG3ZTOHfZwj0DxiwaM6tTGo4hVEETZ6dYBeXPMii0seN0yHduNdoVKUiWz9sSM6MBzGhFQwjAWzUqQ-4E_islx_MRPcHYaiiKwWnCl5jMuJyBrE-lEi4YCSMn0cIAGQVFp35DvPlvVTKdhnEjGK0ixnXciba4v2L6ewTq3uwFlO_EKCQuz8XTHBOV32sRdJc7l0zD8BjnoZ1AE5tsX4Dz24xLd6stKMxoSsGiWoDx2UIog6WEMd5G_bPUz2P1t","summary":[]},{"id":"msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252","type":"message","status":"completed","content":[{"type":"output_text","annotations":[],"logprobs":[],"text":"pydantic-ai is a Python “agent” framework that brings Pydantic’s type-safety and FastAPI-style ergonomics to building production-grade Generative AI apps. It provides:\n\n• A generic Agent[Deps, Output] API with automatic Pydantic validation \n• A unified Model interface supporting 15+ LLM providers (OpenAI, Anthropic, Google, etc.) \n• Structured output correction, dependency-injection tools, and graph-based execution \n• Built-in observability (OpenTelemetry, Logfire), evaluation (pydantic-evals) and durable workflows \n• A monorepo split into core (pydantic-ai-slim), graph, evals, CLI and example packages \n\nIn short, it’s a turnkey, type-safe scaffold for AI agent development."}],"role":"assistant"}],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"default","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[{"type":"mcp","allowed_tools":["ask_question","read_wiki_structure"],"headers":null,"require_approval":"never","server_description":null,"server_label":"deepwiki","server_url":"https://mcp.deepwiki.com/"}],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":1401,"input_tokens_details":{"cached_tokens":0},"output_tokens":480,"output_tokens_details":{"reasoning_tokens":256},"total_tokens":1881},"user":null,"metadata":{}}} + + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '254' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/models/cassettes/test_openai_responses/test_openai_responses_model_mcp_server_tool_with_connector.yaml b/tests/models/cassettes/test_openai_responses/test_openai_responses_model_mcp_server_tool_with_connector.yaml new file mode 100644 index 0000000000..ea82a7aba0 --- /dev/null +++ b/tests/models/cassettes/test_openai_responses/test_openai_responses_model_mcp_server_tool_with_connector.yaml @@ -0,0 +1,217 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '452' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + include: + - reasoning.encrypted_content + input: + - content: What do I have on my Google Calendar for today? + role: user + instructions: You are a helpful assistant. + model: o4-mini + stream: false + tool_choice: auto + tools: + - allowed_tools: + - search_events + authorization: fake + connector_id: connector_googlecalendar + require_approval: never + server_description: Google Calendar + server_label: google_calendar + type: mcp + uri: https://api.openai.com/v1/responses + response: + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '12979' + content-type: + - application/json + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '11211' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + background: false + billing: + payer: developer + created_at: 1761255673 + error: null + id: resp_0558010cf1416a490068faa0f945bc81a0b6a6dfb7391030d5 + incomplete_details: null + instructions: You are a helpful assistant. + max_output_tokens: null + max_tool_calls: null + metadata: {} + model: o4-mini-2025-04-16 + object: response + output: + - id: mcpl_0558010cf1416a490068faa0f9679481a082dc4ac08889f104 + server_label: google_calendar + tools: + - annotations: + read_only: true + description: Look up Google Calendar events using various filters. + input_schema: + properties: + calendar_id: + anyOf: + - type: string + - type: 'null' + default: null + description: The ID of the calendar to search. Default one is 'primary' + title: Calendar Id + max_results: + default: 50 + title: Max Results + type: integer + next_page_token: + anyOf: + - type: string + - type: 'null' + default: null + title: Next Page Token + query: + anyOf: + - type: string + - type: 'null' + default: null + title: Query + time_max: + anyOf: + - type: string + - type: 'null' + default: null + description: Time in the ISO-8601 format. You can also use 'now' or leave null. + title: Time Max + time_min: + anyOf: + - type: string + - type: 'null' + default: null + description: Time in the ISO-8601 format. You can also use 'now' or leave null. + title: Time Min + timezone_str: + anyOf: + - type: string + - type: 'null' + default: null + description: Timezone of the event. Default is 'America/Los_Angeles' + title: Timezone Str + title: search_events_input + type: object + name: search_events + type: mcp_list_tools + - encrypted_content: gAAAAABo-qEE669V-_c3vkQAeRtSj9pi72OLJweRJe4IRZkLcFfnuwdxSeJM5DVDLzb3LbfzU0ee6a4KAae0XsETU3hELT1hn3LZPwfFku5zl7CVgsc1DmYBf41Qki1EPHFyIlMj937K8TbppAAqMknfLHHwV1FLb8TapccSEhJbzGutqD3c2519P9f6XHKcuDa8d-sjyUejF0QuSjINFcjifJ8DiU40cL_-K6OJotlx6e0FqOivz6Nlj13QZxQ0I3FiiSi03mYKy240jYMpOpjXr7yPmEXLdCJdP5ycmTiJLxf4Bugww6u4F2uxy22978ACyFGSLHBiQyjczj_can7qKXAkMwYJKcGNjaNi8jG5iTIwsGswRjD1hvY-AGUotMFbPCszX3HW1M_ar-livaheiZauCfKV-Uc1ZeI3gijWEwtWQ0jye29FyQPCCpOBvT6RbUvFEpfqpwcMQuUhOyEfgzli2dpuOAgkSjCPE6ctoxjbYa62YzE-yrXAGc5_ptQy_2vw7t0k3jUzSo2Tv0aKnqvvKcj9SIilkZV4Nf-TL_d2E7d48bBJDlqbAv7fkhhd2YlkLqwdR1MqZtygcR1Jh8p2Y1pFAa4mSj7hh4M-zfSu--6dij2iKIbnKQ4DbXyGpMZXBAqTHMe9PPOwGxWKShlN5a5T89B04d_GwJYBDJx2ctecqZxDMjkTn3wVGl_5wuDnrEgd0I91vmAoYuWldR_h8M_FjDFiHefdbZjw1TxVKjkp6wk6zQiXCvvCZYJa9XkhytcllWvUI4C0gbxHrEzZRy9Vii3buqnbiIM9Qj0VPx-Q-FKM_usZBBmlvmk9PMQ8rH9vVT8dRFNQEj-aqudB5yUcTx8XaUFwYAts04OObGBqXoazYtxh6WvHwrf09pb_g0dwzE_rlcQdYxcFLOpYD-AentRAjOuIr4bLRM9BMERBxPvvPCxZ2Mva8YqV2TIOtxzMY08freim6du1IuYprO6CoejPaBdULhct-nsPubOdjLBikZt_bwumvmqGXnxI_uu51b9HtzPeDpWIjF6pi88bcsOk0qglA9GAu3wwX-iIdaV19VdVCO4KJjxiVrbTY1IVgWSdz98Alb_HzpXsoS6i2PRAjjsYOe4RBX3etxjsY07XXLlmXAM_vuYXc8Y6STxvBk4ST4OkaCvUk9DoZbVL5KmVcT6TaFpbVCOB_eHkHIvMjXc35kzxCdqEMG3FpRzL_UkY8pPridvq2z1Xw0al2KEBvdKPlInB8-zX5ANGeRkMGZ6ZfyX1zCIdYLe3wrC8xqr5nUZ-ueWmtqYLavSg8mQKphp4QyVaiwtbxEt5GEiVG7_LR754mGQYPdr9Shh3ECAp8wmSfDVO8MHaLmzgo3RXeqlqFldRjQzDHtCaGhjD9bHKF3yWF2LtH4gUN-Sf--86lcq7iwHDSDm656P_FBfYmE7rA0svH-m3hQoBhza4CKJ7s7f7ZymEhcHAfH7SPImZ3Y-kT_Sy1mbCCf3Yg8uitrpX7ukO6_bIANS_R4oiOPcuLixbWY0ZSyq8ERB5fa5EsIUm7PpGxbO96nmk5rPkewyB4gCtslwJI0Ye7zHtqrDBz1j1nsjIKsRCfFWlUdRF8J1JPiiBSvP8SraQ_94cnKBCsl34BGsVm-R1_ULbuyahBzSHq2Kwr0XQuNLdGChyLKS_FZVT58kbRFsvjZnbalAZ-k9alMeZ-pdWX5f9nSn3w7fz675zOxnBaqiZmoWHXFNOBVGH7gkz05ynJ2B8j_RpdRNJKXUN8pAvf595HGl2IPdaDhqoeS2_3jixO5mmxZuPEdzopoBFRarWud99mxH-mYxWJzKiA1pLNqj7SO93p2-jB-jtsCfZfk6bVEWpRRkIEz0XvxffFTVuGUCqpGS7FiFZc4pQU24pCrdpg2w3xeDSrmfHDAx2vUvv0iRBnQxTTWx2-de2TQQTpR5tjFNyOhYGVn1OXqkbkNtIUHdnNGA1QBCU0Qs0471Ss1CrxXIeeNVSTd00jiu4_ELk6nJYgSpmS8G_crrDza8mRLV5Yk0ItRrZj6pwKUOEaYeyM-RHyhrjf09yaf7Qc3sAozQF0aXFCQjSYiVb98DuGH28HLUxW9ulmSKKR4pYKlCOLNGm0h_gWCpSa0H1HXCgEoPn68HyaJogv_xH3k4ERYyJnxu8zVbVPMGoa9q9nNRQQ9Ks2AvxYRQeGFSCTACBmuookvHsO1zjYfHNuSCD7pCLRFE76KlmSiAX6l9LNOq_xe9Oos-1AvcZHkmVsuh-mjTVkBOjG6zmnHiNJirBpORs_UWL5lmlQBeaXgdHxcb4tHIn8XYXFkQiC4b4pw== + id: rs_0558010cf1416a490068faa0fb684081a0a0b70f55d8194bb5 + summary: [] + type: reasoning + - approval_request_id: null + arguments: '{"time_min":"2025-10-23T00:00:00","time_max":"2025-10-23T23:59:59","timezone_str":"America/Los_Angeles","max_results":50,"query":null,"calendar_id":"primary","next_page_token":null}' + error: + code: 500 + message: An unknown error occurred while executing the tool. + type: http_error + id: mcp_0558010cf1416a490068faa0fdf64481a085a3e5b8f7d6559a + name: search_events + output: null + server_label: google_calendar + status: failed + type: mcp_call + - encrypted_content: gAAAAABo-qEE72KCH4RlulMdH6cOTaOQwFy4of4dPd8YlZ-zF9MIsPbumWO2qYlZdGjIIXDJTrlRh_5FJv2LtTmMbdbbECA20AzFMwE4pfNd2aNLC5RhcHKa4M9acC1wYKAddqEOPP7ETVNBj-GMx-tMT_CY8XnBLWvSwPpcfde9E--kSrfsgvRn1umqDsao4sLlAtV-9Gc6hmW1P9CSJDQbHWkdTKMV-cjQ-wZHFCly5kSdIW4OKluFuFRPkrXs7kVmlGnMr8-Q5Zuu1ZOFR9mPvpu2JdxAFohjioM-ftjeBuBWVJvOrIF4nV-yIVHVT-_psAZaPUUB5cyPAtqpoxxIV3iPKPU8DHctP03g_0R6pSWWHhggvO5PBw3zyPwtBwOrHBipc4nQEWEMxZxLH5SYJauTKwHNOx9NyCq8JUjZXM_v4xsGxNa4cAp7GuXqR2YyW2sx7syRUiDwtebh0xk_YOQtkv8tAjzCofmaz3n8FJ2nGSXkilaV5Q8LUNO-9-D2tsAaScDVMuLMMAHFNp_GPplWrmGES4mTCNtTXWyF1GLcQBw8dYYctV66Ocy2_zxyDoB7SsR5htlV77nJ6u1Hbp3tk26LutDrhAhe55xcki8iblHbXNY9MRzR1SS5Zk3-dv0ex4QOzC663NvS9aK3olQbKYko5TvM7Pq4MFYfaxwFTVFVEdaskoDJieVyikz0ZzBjTsItIwL-Q2BVN2F_P_wgCV5hyDclNMPEGTMxajxfIFv-oEunmHY1_RJavl47iXWS8H3JWAvp-9YYQdTS4Aa6m5zPndvHOvEV355UawLHRPctHFUS7rE7rYmcU6KQaqC96JRM0KRfXNIgYtNfw6cxgnyqGxzTF7qeeVzObOqoQmz59Rh0U9ti37vqHb8Ca43-q2Gx2KaVZFj7MBQK8UodfaDRIEuyMB3XNfckxCefwHs7FeAj5NuNDBrm0uDcwJjs2JfY2i54gAES8kAPLGJgRpq_qdjVXqpO6W0H9E1vBdRem7zLPYbA8OOo-KCkRW4AFCVbgCpgIvo4GDNvFOMksl-d8zgQU2qroUWJRu58j1bdaar7Zlfxk0UR33nROmJpXGb_R-RCNAN1ZxJTdEU_dVfyLCeuIXPsnO-FlfO8J6Un3WWPNLuN_bDS5RocniI_ms71qLsisJQiPTs-JDFl-eMM2Hk3QqSCC6OT0CLG9XMmI_zva9yp2joQ8HdGMddE3FDCbLejRrx8fV-9Nd0tZ7SYjFG78_fre8IfL0L67CK1JIPYzhgRZgCb-FFwUy-stR_BstIn0sRr_tDCoHdxuoVCh0dZfTY1p27xbKQ50svHxp1caNp3uze0wLXP9STNouFjFpdIHMsDRaGfO9R9mMmUsFcmBMK3aikuHTpebyL1CeZsIzH2cbZLPRx3pN2IqJ-5h6-cORHuMqf3ysEEFCjXnqmzvWPuBjYDsxnxA1awaGkYKsKhqchgakrfplOjdG5tSkklggBJA93iRaUWIR-4oV6HkkrnpdK1w7BL_VT8upqZmkpHZtZCDSgINk5S5hoYPLBTtS3dcCmQIbLvPXPuGzdAZxl0bhD4Rm3GPDFszaDoFK0Jszcjlaf4SJqyZABKEf71dDbi1as-2Qwr4fxBiQIOsF8ChbYo6Z2iFtUpBnbruFUIwB5QyKfWnwEZbOgf4UbIvIqNMkTzMc8tJgz6Ddqfih8VeNH3v8_84J6vHU0SVm_gvkgQ6P6N_6r5LwNdlAEff0hFwn-aTHWZ3s8MICckUZj97lKoZxAl91WlsKa0yrLw24dxvJ6bhZf0FsOitUJGd7vFPx0TxSobUkzE2RrbQ3hziPxw2Gins4aI6YG3M1gfumd3MgdH-fYBvZulJ9vmw0ZC1Dqh6BkCWHOFKsnpQvHmYuyTzUmnYuJf8N5j_b9XNw0krmxouOCPQClFmIOBLw8XPbe3xf0F5JP7BC0PpjlPT33A5Z6Za5zlA5O-DE_Wp0WG885-GaKtZI-zBZW3R0lc9A4s0HbxqA3lqH8leXOCe6WO46Z_iTQlALpTR-7oaHqzTegq0KSmEjCFO-jLSrVZnBOQ4ddTvLj4ASsQbj-o6TFUFVZAKSLI3FtWovHw02Gc_D0luFz9TbfaXM-EapEQYajkG0_b_nSCoPq0T9HSyvU4oCxXyQvhwIgzbijR-BheN6a_l6hiqZCw9L1c8MdPRtjpbHtEwWkpQ62s8XdydeJnV5vJYp9ezBbS_vWQ7Nz1siai6epJTdzDkRm-dudVhKzdohwg-FOQ-5gSrvoPS_MF4lZvah3iXY1g4uePO4eNDWGJ74YPybiy + id: rs_0558010cf1416a490068faa0ff5c9081a0b156a84d46e5d787 + summary: [] + type: reasoning + - approval_request_id: null + arguments: '{"time_min":"2025-10-23T00:00:00Z","time_max":"2025-10-23T23:59:59Z","timezone_str":null,"max_results":50,"query":null,"calendar_id":"primary","next_page_token":null}' + error: + code: 500 + message: An unknown error occurred while executing the tool. + type: http_error + id: mcp_0558010cf1416a490068faa102400481a09fa35e84bc26c170 + name: search_events + output: null + server_label: google_calendar + status: failed + type: mcp_call + - encrypted_content: gAAAAABo-qEECuiSxfvrR92v1hkqyCTCWyfmpHSaW-vVouk5mOTIFDvaBZdVTFH8-dJfpwEG3MCejRKh9V-I8mrYAjhudVr1ayHo8UYOOU1cfVc6w3wsrkL8hXljjE-amiJhBSjvRc2nwwGtgYpDxOfWTqJkaUvFnMD6MrS4CwMrCBbDOLYZgM1cQbidtrrtpP7D5u42tR6coC_PCOqwPzDN4f0RggrxVxh0038p81VUmlkUeA2jWzRyFpeDGRjXFk84Og73rXAp7EWQv7TmzgVXBjCVwwzJNU8HCZ_gkwh5dvL94QxBx32lEmfOOKcqA3hN3FLwDqXlZ8f7jEqYInnpILQgX5XMdM9OrCyXmDCr_eIy00cjvxnTcXhCnZBOaKCKmTP74yUpGNdLbQcr4BalTiviNYEeCAhJyRo4KnhUZbBoT7MB5NULf-kqhRo1gEGKjWiLdV47PhR7Z8i4BK7zBceganMKpLtzIMW5a6JAujC4Z9FYxcpJZI_CD9NHsPr4SjKgIwv89d6BYo89-xfflF6ZUZBkuDUnL2-Nc9CKgGuKlcDunvYLr38pzA278OFYzh9T42u4SbS8KkSXKjGU3H8LfpMnBEZigriixLt5vj7qnWmZvCFarzxT4U4qqR1ITp5rkO6G9kYvBEfS7wu768mteDBgAajUaeOMQEfjJRErC4wfzbB89YCsXPJz0JE90QZ5LeiP5ZlVezTTaddG9JmiGsBCPckqUb1LWdpvekCfPkePF_uDMVWyJpQ4ZBzQsZx8sHf5spygsiQjlzTiriqwhoTcPuXoONoCr9HeFX1Qy8SGOm87siRPAD7FHJdDxbJwq8tOlMpx8MH1dqEY07lwoxZB0GQ9XbB7QJXfQR_27nkpqBYFkrbqChNJLO2x8gNFClbB0mgYQE1CRy64y6yOrG3CtS53RK5VGrF1GnqwuWdZ452VgShT5nAmPFRlRk1S9px4eMUTAozT0QAYrlHQC7b6I6K3m_Qe3kXGpnn_87i2eGG8mHmXG2FvFChkgf2OU7-LRy_Wl_u-ataICeoBwfngBFMppvUW6tJP009HK7mUE8P1KJntN3ExKLIBhmKhV6ziBpIi1bSTmd8leYqfSaf648c7-sVuDRx7DzxTp19l3fwVFa67GdiagZFs7xaU1HxMnMc3uy5VKWAH_qcv-Mga3VCTtTPpMTjvB95nsLeOFjS2FtpPvaP0N6o5kkkzW7cteWpOHhSX0z7AQA7CqgOCQLfLUc7ltVxnOH4WdHoeZFah_q_Ue6caf0kNo4YsTfbRDdzsW70o8P5Agr-Pgttg19vTDA_eBFur9GDKIRT0vYMWPpykwJBDTgJKOFW6uyNkqNWk_RAAvleE9pAyOoSmgomyrMcnnpdeYHNxeNxvTWFC3mcKSjJIB316wypPvaGTJyaK_pxJScD7CtLrIPkgwPpOsJnDySF6wGe-fGsUMt3zxJrc-S6fp24mYVfTRZbjUsP0fJgLmCohJiAtEg_xvlQ8sPyuLoLdOdossTQ7ufl0CwVn4f_ol4q__gpTvYVaoGsWl3QmHul5zj7OUAn7of6iBfCSlXbrauJvMyNYt4x_dLM8SXTRNPe-ZMDmER9DOw0KJXcUrpl6uw4TphKmUOK6KrxqshujXdN9VDgOwD7eKqIHpvC_6a2R6sS6ZHcebmh2o3bic-Hctomrbv03OQ== + id: rs_0558010cf1416a490068faa102d89481a0b74cca04bcb8f127 + summary: [] + type: reasoning + - content: + - annotations: [] + logprobs: [] + text: I’m sorry, I’m having trouble accessing your calendar right now. Would you like me to try again? + type: output_text + id: msg_0558010cf1416a490068faa103e6c481a0930eda4f04bb3f2a + role: assistant + status: completed + type: message + parallel_tool_calls: true + previous_response_id: null + prompt_cache_key: null + reasoning: + effort: medium + summary: null + safety_identifier: null + service_tier: default + status: completed + store: true + temperature: 1.0 + text: + format: + type: text + verbosity: medium + tool_choice: auto + tools: + - allowed_tools: + - search_events + authorization: + connector_id: connector_googlecalendar + headers: null + require_approval: never + server_description: Google Calendar + server_label: google_calendar + server_url: null + type: mcp + top_logprobs: 0 + top_p: 1.0 + truncation: disabled + usage: + input_tokens: 1065 + input_tokens_details: + cached_tokens: 0 + output_tokens: 760 + output_tokens_details: + reasoning_tokens: 576 + total_tokens: 1825 + user: null + status: + code: 200 + message: OK +version: 1 diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 5fe850bf45..aa78ff0904 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -41,7 +41,7 @@ ToolReturnPart, UserPromptPart, ) -from pydantic_ai.builtin_tools import CodeExecutionTool, MemoryTool, WebSearchTool +from pydantic_ai.builtin_tools import CodeExecutionTool, MCPServerTool, MemoryTool, WebSearchTool from pydantic_ai.exceptions import UserError from pydantic_ai.messages import ( BuiltinToolCallEvent, # pyright: ignore[reportDeprecated] @@ -3064,6 +3064,520 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None ) +async def test_anthropic_mcp_servers(allow_model_requests: None, anthropic_api_key: str): + m = AnthropicModel('claude-sonnet-4-0', provider=AnthropicProvider(api_key=anthropic_api_key)) + settings = AnthropicModelSettings(anthropic_thinking={'type': 'enabled', 'budget_tokens': 3000}) + agent = Agent( + m, + builtin_tools=[ + MCPServerTool( + id='deepwiki', + url='https://mcp.deepwiki.com/mcp', + ) + ], + model_settings=settings, + ) + + result = await agent.run('Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short') + messages = result.all_messages() + assert messages == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content='Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + ThinkingPart( + content='The user is asking about the pydantic/pydantic-ai repository and wants me to keep the answer short. I should use the deepwiki tools to get information about this repository. Let me start by asking a general question about what this repository is about.', + signature='EqUDCkYICBgCKkCTiLjx5Rzw9zXo4pFDhFAc9Ci1R+d2fpkiqw7IPt1PgxBankr7bhRfh2iQOFEUy7sYVtsBxvnHW8zfBRxH1j6lEgySvdOyObrcFdJX3qkaDMAMCdLHIevZ/mSx/SIwi917U34N5jLQH1yMoCx/k72klLG5v42vcwUTG4ngKDI69Ddaf0eeDpgg3tL5FHfvKowCnslWg3Pd3ITe+TLlzu+OVZhRKU9SEwDJbjV7ZF954Ls6XExAfjdXhrhvXDB+hz6fZFPGFEfXV7jwElFT5HcGPWy84xvlwzbklZ2zH3XViik0B5dMErMAKs6IVwqXo3s+0p9xtX5gCBuvLkalET2upNsmdKGJv7WQWoaLch5N07uvSgWkO8AkGuVtBgqZH+uRGlPfYlnAgifNHu00GSAVK3beeyZfpnSQ6LQKcH+wVmrOi/3UvzA5f1LvsXG32gQKUCxztATnlBaI+7GMs1IAloaRHBndyRoe8Lwv79zZe9u9gnF9WCgK3yQsAR5hGZXlBKiIWfnRrXQ7QmA2hVO+mhEOCnz7OQkMIEUlfxgB', + provider_name='anthropic', + ), + BuiltinToolCallPart( + tool_name='mcp_server', + args={ + 'action': 'call_tool', + 'server_id': 'deepwiki', + 'tool_name': 'ask_question', + 'tool_args': { + 'repoName': 'pydantic/pydantic-ai', + 'question': 'What is pydantic-ai and what does this repository do?', + }, + }, + tool_call_id='mcptoolu_01SAss3KEwASziHZoMR6HcZU', + provider_name='anthropic', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'content': [ + { + 'citations': None, + 'text': IsStr(), + 'type': 'text', + } + ], + 'is_error': False, + }, + tool_call_id='mcptoolu_01SAss3KEwASziHZoMR6HcZU', + timestamp=IsDatetime(), + provider_name='anthropic', + ), + TextPart( + content="""\ +**Pydantic AI** is a Python agent framework for building production-grade applications with Generative AI. It provides: + +- **Type-safe agents** with compile-time validation using `Agent[Deps, Output]` +- **Model-agnostic design** supporting 15+ LLM providers (OpenAI, Anthropic, Google, etc.) +- **Structured outputs** with automatic Pydantic validation and self-correction +- **Built-in observability** via OpenTelemetry and Logfire integration +- **Production tooling** including evaluation framework, durable execution, and tool system + +The repo is organized as a monorepo with core packages like `pydantic-ai-slim` (core framework), `pydantic-graph` (execution engine), and `pydantic-evals` (evaluation tools). It emphasizes developer ergonomics and type safety, similar to Pydantic and FastAPI.\ +""" + ), + ], + usage=RequestUsage( + input_tokens=2674, + output_tokens=373, + details={ + 'cache_creation_input_tokens': 0, + 'cache_read_input_tokens': 0, + 'input_tokens': 2674, + 'output_tokens': 373, + }, + ), + model_name='claude-sonnet-4-20250514', + timestamp=IsDatetime(), + provider_name='anthropic', + provider_details={'finish_reason': 'end_turn'}, + provider_response_id='msg_01MYDjkvBDRaKsY6PDwQz3n6', + finish_reason='stop', + ), + ] + ) + + result = await agent.run('How about the pydantic repo in the same org?', message_history=messages) + messages = result.new_messages() + assert messages == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content='How about the pydantic repo in the same org?', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + ThinkingPart( + content='The user is asking about the pydantic repo in the same org, so that would be pydantic/pydantic. I should ask about what this repository does and provide a short answer.', + signature='EtECCkYICBgCKkAkKy+K3Z/q4dGwZGr1MdsH8HLaULElUSaa/Y8A1L/Jp7y1AfJd1zrTL7Zfa2KoPr0HqO/AI/cJJreheuwcn/dWEgw0bPLie900a4h9wS0aDACnsdbr+adzpUyExiIwyuNjV82BVkK/kU+sMyrfbhgb6ob/DUgudJPaK5zR6cINAAGQnIy3iOXTwu3OUfPAKrgBzF9HD5HjiPSJdsxlkI0RA5Yjiol05/hR3fUB6WWrs0aouxIzlriJ6NzmzvqctkFJdRgAL9Mh06iK1A61PLyBWRdo1f5TBziFP1c6z7iQQzH9DdcaHvG8yLoaadbyTxMvTn2PtfEcSPjuZcLgv7QcF+HZXbDVjsHJW78OK2ta0M6/xuU1p4yG3qgoss3b0G6fAyvUVgVbb1wknkE/9W9gd2k/ZSh4P7F6AcvLTXQScTyMfWRtAWQqABgB', + provider_name='anthropic', + ), + BuiltinToolCallPart( + tool_name='mcp_server', + args={ + 'action': 'call_tool', + 'server_id': 'deepwiki', + 'tool_name': 'ask_question', + 'tool_args': { + 'repoName': 'pydantic/pydantic', + 'question': 'What is Pydantic and what does this repository do?', + }, + }, + tool_call_id='mcptoolu_01A9RvAqDeoUnaMgQc6Nn75y', + provider_name='anthropic', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'content': [ + { + 'citations': None, + 'text': """\ +Pydantic is a Python library for data validation, parsing, and serialization using type hints . This repository, `pydantic/pydantic`, contains the source code for the Pydantic library itself, including its core validation logic, documentation, and continuous integration/continuous deployment (CI/CD) pipelines . + +## What is Pydantic + +Pydantic is designed to ensure that data conforms to specified types and constraints at runtime . It leverages Python type hints to define data schemas and provides mechanisms for data conversion and validation . The library's core validation logic is implemented in Rust within a separate package called `pydantic-core`, which contributes to its performance . + +Pydantic offers several user-facing APIs for validation: +* `BaseModel`: Used for defining class-based models with fields, suitable for domain models, API schemas, and configuration . +* `TypeAdapter`: Provides a flexible way to validate and serialize arbitrary Python types, including primitive types and dataclasses . +* `@dataclass`: Enhances Python's built-in dataclasses with Pydantic's validation capabilities . +* `@validate_call`: Used for validating function arguments and return values . + +## What this Repository Does + +The `pydantic/pydantic` repository serves as the development hub for the Pydantic library. Its primary functions include: + +### Core Library Development +The repository contains the Python source code for the Pydantic library, including modules for `BaseModel` , `Field` definitions , configuration management , and type adapters . It also includes internal modules responsible for model construction and schema generation . + +### Documentation +The repository hosts the documentation for Pydantic, which is built using MkDocs . The documentation covers installation instructions , core concepts like models , fields, and JSON Schema generation . It also includes information on contributing to the project . + +### Continuous Integration and Deployment (CI/CD) +The repository utilizes GitHub Actions for its CI/CD pipeline . This pipeline includes: +* **Linting**: Checks code quality and style . +* **Testing**: Runs a comprehensive test suite across multiple operating systems and Python versions . This includes memory profiling tests, Mypy plugin tests, and type-checking integration tests . +* **Coverage**: Aggregates test coverage data and posts comments to pull requests . +* **Release Process**: Automates publishing new versions to PyPI and sending release announcements . +* **Third-Party Integration Testing**: Tests Pydantic's compatibility with other popular libraries like FastAPI, SQLModel, and Beanie . +* **Dependency Management**: Uses `uv` for managing dependencies and includes workflows to check compatibility with various dependency versions . +* **Performance Benchmarking**: Utilizes CodSpeed to track and analyze performance . + +## Versioning and Compatibility +Pydantic maintains strict version compatibility between the pure Python package (`pydantic`) and its Rust-based validation core (`pydantic-core`) . A `SystemError` is raised if there's a mismatch in `pydantic-core` versions, ensuring a stable environment . The `version_info()` function provides detailed version information for Pydantic and its dependencies . + +Notes: +The `CITATION.cff` file also provides a concise description of Pydantic as "the most widely used data validation library for Python" . The `README.md` and `docs/index.md` files reiterate this, emphasizing its speed and extensibility . + +Wiki pages you might want to explore: +- [Overview (pydantic/pydantic)](/wiki/pydantic/pydantic#1) +- [Development and Deployment (pydantic/pydantic)](/wiki/pydantic/pydantic#7) + +View this search on DeepWiki: https://deepwiki.com/search/what-is-pydantic-and-what-does_dab96efa-752a-4688-a630-3f4658084a88 +""", + 'type': 'text', + } + ], + 'is_error': False, + }, + tool_call_id='mcptoolu_01A9RvAqDeoUnaMgQc6Nn75y', + timestamp=IsDatetime(), + provider_name='anthropic', + ), + TextPart( + content="""\ +**Pydantic** is Python's most widely used data validation library for parsing, validation, and serialization using type hints. The repository contains: + +**Core Features:** +- **Data validation** with automatic type conversion and constraint checking +- **Multiple APIs**: `BaseModel` for class-based models, `TypeAdapter` for arbitrary types, `@dataclass` decorator, and `@validate_call` for functions +- **High performance** via Rust-based validation core (`pydantic-core`) +- **JSON Schema generation** and comprehensive serialization support + +**Repository Contents:** +- Python source code for the main Pydantic library +- Comprehensive documentation built with MkDocs +- Extensive CI/CD pipeline with testing across multiple Python versions and OS +- Integration testing with popular libraries (FastAPI, SQLModel, etc.) +- Performance benchmarking and dependency compatibility checks + +Pydantic ensures runtime data integrity through type hints and is foundational to many Python frameworks, especially in web APIs and data processing applications.\ +""" + ), + ], + usage=RequestUsage( + input_tokens=5262, + output_tokens=369, + details={ + 'cache_creation_input_tokens': 0, + 'cache_read_input_tokens': 0, + 'input_tokens': 5262, + 'output_tokens': 369, + }, + ), + model_name='claude-sonnet-4-20250514', + timestamp=IsDatetime(), + provider_name='anthropic', + provider_details={'finish_reason': 'end_turn'}, + provider_response_id='msg_01DSGib8F7nNoYprfYSGp1sd', + finish_reason='stop', + ), + ] + ) + + +async def test_anthropic_mcp_servers_stream(allow_model_requests: None, anthropic_api_key: str): + m = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(api_key=anthropic_api_key)) + settings = AnthropicModelSettings(anthropic_thinking={'type': 'enabled', 'budget_tokens': 3000}) + agent = Agent( + m, + builtin_tools=[ + MCPServerTool( + id='deepwiki', + url='https://mcp.deepwiki.com/mcp', + allowed_tools=['ask_question'], + ) + ], + model_settings=settings, + ) + + event_parts: list[Any] = [] + async with agent.iter( + user_prompt='Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short' + ) as agent_run: + async for node in agent_run: + if Agent.is_model_request_node(node) or Agent.is_call_tools_node(node): + async with node.stream(agent_run.ctx) as request_stream: + async for event in request_stream: + if ( + isinstance(event, PartStartEvent) + and isinstance(event.part, BuiltinToolCallPart | BuiltinToolReturnPart) + ) or (isinstance(event, PartDeltaEvent) and isinstance(event.delta, ToolCallPartDelta)): + event_parts.append(event) + + assert agent_run.result is not None + messages = agent_run.result.all_messages() + assert messages == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content='Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + ThinkingPart( + content='The user is asking about the pydantic/pydantic-ai repository. They want a short answer about the repo. I should use the deepwiki_ask_question function to get information about this repository.', + signature='EuoCCkYICBgCKkDPqznnPHupi9rVXvaQQqrMprXof9wtQsCqw7Yw687UIk/FvF65omU22QO+CmIcYqTwhBfifPEp9A3/lM9C8cIcEgzGsjorcyNe2H0ZFf8aDCA4iLG6qgUL6fLhzCIwVWcg65CrvSFusXtMH18p+XiF+BUxT+rvnCFsnLbFsxtjGyKh1j4UW6V0Tk0O7+3sKtEBEzvxztXkMkeXkXRsQFJ00jTNhkUHu74sqnh6QxgV8wK2vlJRnBnes/oh7QdED0h/pZaUbxplYJiPFisWx/zTJQvOv29I46sM2CdY5ggGO1KWrEF/pognyod+jdCdb481XUET9T7nl/VMz/Og2QkyGf+5MvSecKQhujlS0VFhCgaYv68sl0Fv3hj2AkeE4vcYu3YdDaNDLXerbIaLCMkkn08NID/wKZTwtLSL+N6+kOi+4peGqXDNps8oa3mqIn7NAWFlwEUrFZd5kjtDkQ5dw/IYAQ==', + provider_name='anthropic', + ), + BuiltinToolCallPart( + tool_name='mcp_server', + args='{"action":"call_tool","server_id":"deepwiki","tool_name":"ask_question","tool_args":{"repoName": "pydantic/pydantic-ai", "question": "What is this repository about? What are its main features and purpose?"}}', + tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1', + provider_name='anthropic', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'content': [ + { + 'citations': None, + 'text': IsStr(), + 'type': 'text', + } + ], + 'is_error': False, + }, + tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1', + timestamp=IsDatetime(), + provider_name='anthropic', + ), + TextPart( + content="""\ +**Pydantic-AI** is a framework for building Generative AI applications with type safety. It provides: + +- **Unified LLM interface** - Works with OpenAI, Anthropic, Google, Groq, Cohere, Mistral, AWS Bedrock, and more +- **Type-safe agents** - Uses Pydantic for validation and type checking throughout +- **Tool integration** - Easily add custom functions/tools agents can call +- **Graph-based execution** - Manages agent workflows as finite state machines +- **Multiple output formats** - Text, structured data, and multimodal content +- **Durable execution** - Integration with systems like DBOS and Temporal for fault tolerance +- **Streaming support** - Stream responses in real-time + +It's designed to simplify building robust, production-ready AI agents while abstracting away provider-specific complexities.\ +""" + ), + ], + usage=RequestUsage( + input_tokens=3042, + output_tokens=354, + details={ + 'cache_creation_input_tokens': 0, + 'cache_read_input_tokens': 0, + 'input_tokens': 3042, + 'output_tokens': 354, + }, + ), + model_name='claude-sonnet-4-5-20250929', + timestamp=IsDatetime(), + provider_name='anthropic', + provider_details={'finish_reason': 'end_turn'}, + provider_response_id='msg_01Xf6SmUVY1mDrSwFc5RsY3n', + finish_reason='stop', + ), + ] + ) + + assert event_parts == snapshot( + [ + PartStartEvent( + index=1, + part=BuiltinToolCallPart( + tool_name='mcp_server', + tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1', + provider_name='anthropic', + ), + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta( + args_delta='{"action":"call_tool","server_id":"deepwiki","tool_name":"ask_question","tool_args":', + tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1', + ), + ), + PartDeltaEvent( + index=1, delta=ToolCallPartDelta(args_delta='', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1') + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='{"repoName"', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, delta=ToolCallPartDelta(args_delta=': "', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1') + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='pydantic', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='/pydantic-ai', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, delta=ToolCallPartDelta(args_delta='"', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1') + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta=', "question', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='": "What', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, delta=ToolCallPartDelta(args_delta=' is ', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1') + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='this repo', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='sitory about', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, delta=ToolCallPartDelta(args_delta='? Wha', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1') + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='t are i', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='ts main feat', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='ure', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='s and purpo', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='se?"}', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartDeltaEvent( + index=1, + delta=ToolCallPartDelta(args_delta='}', tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1'), + ), + PartStartEvent( + index=2, + part=BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'content': [ + { + 'citations': None, + 'text': """\ +This repository, `pydantic/pydantic-ai`, is a GenAI Agent Framework that leverages Pydantic for building Generative AI applications. Its main purpose is to provide a unified and type-safe way to interact with various large language models (LLMs) from different providers, manage agent execution flows, and integrate with external tools and services. \n\ + +## Main Features and Purpose + +The `pydantic-ai` repository offers several core features: + +### 1. Agent System +The `Agent` class serves as the main orchestrator for managing interactions with LLMs and executing tasks. Agents can be configured with generic types for dependency injection (`Agent[AgentDepsT, OutputDataT]`) and output validation, ensuring type safety throughout the application. \n\ + +Agents support various execution methods: +* `agent.run()`: An asynchronous function that returns a completed `RunResult`. \n\ +* `agent.run_sync()`: A synchronous function that internally calls `run()` to return a completed `RunResult`. \n\ +* `agent.run_stream()`: An asynchronous context manager for streaming text and structured output. \n\ +* `agent.run_stream_events()`: Returns an asynchronous iterable of `AgentStreamEvent`s and a final `AgentRunResultEvent`. \n\ +* `agent.iter()`: A context manager that provides an asynchronous iterable over the nodes of the agent's underlying `Graph`, allowing for deeper control and insight into the execution flow. \n\ + +### 2. Model Integration +The framework provides a unified interface for integrating with various LLM providers, including OpenAI, Anthropic, Google, Groq, Cohere, Mistral, Bedrock, and HuggingFace. Each model integration follows a consistent settings pattern with provider-specific prefixes (e.g., `google_*`, `anthropic_*`). \n\ + +Examples of supported models and their capabilities include: +* `GoogleModel`: Integrates with Google's Gemini API, supporting both Gemini API (`google-gla`) and Vertex AI (`google-vertex`) providers. It supports token counting, streaming, built-in tools like `WebSearchTool`, `UrlContextTool`, `CodeExecutionTool`, and native JSON schema output. \n\ +* `AnthropicModel`: Uses Anthropic's beta API for advanced features like "Thinking Blocks" and built-in tools. \n\ +* `GroqModel`: Offers high-speed inference and specialized reasoning support with configurable reasoning formats. \n\ +* `MistralModel`: Supports customizable JSON schema prompting and thinking support. \n\ +* `BedrockConverseModel`: Utilizes AWS Bedrock's Converse API for unified access to various foundation models like Claude, Titan, Llama, and Mistral. \n\ +* `CohereModel`: Integrates with Cohere's v2 API for chat completions, including thinking support and tool calling. \n\ + +The framework also supports multimodal inputs such as `AudioUrl`, `DocumentUrl`, `ImageUrl`, and `VideoUrl`, allowing agents to process and respond to diverse content types. \n\ + +### 3. Graph-based Execution +Pydantic AI uses `pydantic-graph` to manage the execution flow of agents, representing it as a finite state machine. The execution typically flows through `UserPromptNode` → `ModelRequestNode` → `CallToolsNode`. This allows for detailed tracking of message history and usage. \n\ + +### 4. Tool System +Function tools enable models to perform actions and retrieve additional information. Tools can be registered using decorators like `@agent.tool` (for tools needing `RunContext` access) or `@agent.tool_plain` (for tools without `RunContext` access). The framework also supports toolsets for managing collections of tools. \n\ + +Tools can return various types of output, including anything Pydantic can serialize to JSON, as well as multimodal content like `AudioUrl`, `VideoUrl`, `ImageUrl`, or `DocumentUrl`. The `ToolReturn` object allows for separating the `return_value` (for the model), `content` (for additional context), and `metadata` (for application-specific use). \n\ + +Built-in tools like `UrlContextTool` allow agents to pull web content into their context. \n\ + +### 5. Output Handling +The framework supports various output types: +* `TextOutput`: Plain text responses. \n\ +* `ToolOutput`: Structured data via tool calls. \n\ +* `NativeOutput`: Provider-specific structured output. \n\ +* `PromptedOutput`: Prompt-based structured extraction. \n\ + +### 6. Durable Execution +Pydantic AI integrates with durable execution systems like DBOS and Temporal. This allows agents to maintain state and resume execution after failures or restarts, making them suitable for long-running or fault-tolerant applications. \n\ + +### 7. Multi-Agent Patterns and Integrations +The repository supports multi-agent applications and various integrations, including: +* Pydantic Evals: For evaluating agent performance. \n\ +* Pydantic Graph: The underlying graph execution engine. \n\ +* Logfire: For debugging and monitoring. \n\ +* Agent-User Interaction (AG-UI) and Agent2Agent (A2A): For facilitating interactions between agents and users, and between agents themselves. \n\ +* Clai: A CLI tool. \n\ + +## Purpose + +The overarching purpose of `pydantic-ai` is to simplify the development of robust and reliable Generative AI applications by providing a structured, type-safe, and extensible framework. It aims to abstract away the complexities of interacting with different LLM providers and managing agent workflows, allowing developers to focus on application logic. \n\ + +Notes: +The `CLAUDE.md` file provides guidance for Claude Code when working with the repository, outlining development commands and project architecture. The `mkdocs.yml` file defines the structure and content of the project's documentation, further detailing the features and organization of the repository. \n\ + +Wiki pages you might want to explore: +- [Google, Anthropic and Other Providers (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#3.3) + +View this search on DeepWiki: https://deepwiki.com/search/what-is-this-repository-about_5104a64d-2f5e-4461-80d8-eb0892242441 +""", + 'type': 'text', + } + ], + 'is_error': False, + }, + tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1', + timestamp=IsDatetime(), + provider_name='anthropic', + ), + ), + ] + ) + + async def test_anthropic_code_execution_tool(allow_model_requests: None, anthropic_api_key: str): m = AnthropicModel('claude-sonnet-4-0', provider=AnthropicProvider(api_key=anthropic_api_key)) settings = AnthropicModelSettings(anthropic_thinking={'type': 'enabled', 'budget_tokens': 3000}) diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index 4d6f9f4f2d..4cbaffc87b 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -35,7 +35,7 @@ capture_run_messages, ) from pydantic_ai.agent import Agent -from pydantic_ai.builtin_tools import CodeExecutionTool, WebSearchTool +from pydantic_ai.builtin_tools import CodeExecutionTool, MCPServerTool, WebSearchTool from pydantic_ai.exceptions import ModelHTTPError, ModelRetry from pydantic_ai.messages import ( BuiltinToolCallEvent, # pyright: ignore[reportDeprecated] @@ -6320,3 +6320,803 @@ class CityLocation(BaseModel): ), ] ) + + +async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None, openai_api_key: str): + m = OpenAIResponsesModel( + 'o4-mini', + provider=OpenAIProvider(api_key=openai_api_key), + ) + agent = Agent( + m, + instructions='You are a helpful assistant.', + builtin_tools=[ + MCPServerTool( + id='deepwiki', + url='https://mcp.deepwiki.com/mcp', + description='DeepWiki MCP server', + allowed_tools=['ask_question'], + headers={'custom-header-key': 'custom-header-value'}, + ), + ], + ) + + result = await agent.run('Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short') + assert result.all_messages() == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content='Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short', + timestamp=IsDatetime(), + ) + ], + instructions='You are a helpful assistant.', + ), + ModelResponse( + parts=[ + BuiltinToolCallPart( + tool_name='mcp_server', + args={'action': 'list_tools', 'server_id': 'deepwiki'}, + tool_call_id='mcpl_0083938b3a28070e0068fabd81d51081a09d4b183ced693273', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'tools': [ + { + 'input_schema': { + 'type': 'object', + 'properties': { + 'repoName': { + 'type': 'string', + 'description': 'GitHub repository: owner/repo (e.g. "facebook/react")', + }, + 'question': { + 'type': 'string', + 'description': 'The question to ask about the repository', + }, + }, + 'required': ['repoName', 'question'], + 'additionalProperties': False, + '$schema': 'http://json-schema.org/draft-07/schema#', + }, + 'name': 'ask_question', + 'annotations': {'read_only': False}, + 'description': 'Ask any question about a GitHub repository', + } + ], + 'error': None, + }, + tool_call_id='mcpl_0083938b3a28070e0068fabd81d51081a09d4b183ced693273', + timestamp=IsDatetime(), + provider_name='openai', + ), + ThinkingPart( + content='', + id='rs_0083938b3a28070e0068fabd84727c81a0a52c171d2568a947', + signature='gAAAAABo-r2bs6ChS2NtAXH6S8ZWRHzygQvAZrQGsb5ziJKg6dINF9TQnq4llBquiZh-3Ngx2Ha4S-2_TLSbgcsglradULI8c8N2CnilghcqlLE90MXgHWzGfMDbmnRVpTW9iJsOnBn4ferQtNLIsXzfGWq4Ov0Bbvlw_fCm9pQsqOavcJ5Kop2lJ9Xqb__boYMcBCPq3FcNlfC3aia2wZkacS4qKZGqytqQP13EX3q6LwFVnAMIFuwn5XLrh4lFf-S5u8UIw3C6wvVIXEUatY6-awgHHJKXxWUxqRQPJegatMb8KE-QtuKQUfdvEE0ykdHtWqT7nnC3qTY67UaSCCvJ9SdXj-t806GVei9McSUe8riU3viHnfY0R0u9GIXsVnfVthIDRnX7KzpF5ot_CpCrgbCmD9Rj2AAos5pCdSzpc08G5auUuuMZfoiWANADTHHhO2OvflSEpmO8pb-QAYfMoK9exYVQ8Oig-Nj35unupcYy7A2bDCViXzqy32aw9QHmH7rErI4v72beWQxRVdX15Z7VS2c6L1dD7cU18K35CWqlSz9hEX5AcGqEEtIDVu1TdF3m1m2u4ooc4TjYpRecjYoG8Ib-vVKoX5C65a7G1cTbCo8dO0DYKGgM8jM7ZDubxbCcZ22Sxk58f8cer7WxHyp7WRo5-6zvMwMCk8uEY44RJmg-m0Oxl_6qxdr4Md80xZah_6tCCB62agQmYwCrR75_r93xOckQAK0R_37khvQD5gWVlE5Rg-01eUTboiPGqYmIsqWvOkziMGnxgKVw_yUf8swHU1ciWr7O1EdVPHLG7YXlVQTHTE_CX3uOsE2FoZnpS_MgpxGfjb76majV50h7mJ6ySVPF_3NF3RQXx64W08SW4eVFD8JJf0yChqXDmlwu2CDZN1n99xdaE9QbMODNEOmfTQOPhQ9g-4LhstNTKCCxWDh0qiv_dq2qAd0I9Gupoit33xGpb66mndc0nuuNFe8-16iC_KzQtHBNzgasgYK-r83KFVmiYK3Jxvz_2dfdwe0M1q7NLBvbnWc6k9LIf8iDUF6Q1J-cfC7SsncCbROtzIPlKpQwxhP-M09Xy3RVxlH9dcvuk3_qqEAartUQC8ZbuLRbhiq66eE1RvQzdNd2tsoBQ85cdNs57Penio7w9zILUf1JP5O8-zCe5GPC3W3EXTIEvHR-kiuxJvhcsySijpldGmuygRx05ARNOIT7VDCZvF23RfmnRduY1X1FAqb_i_aMStK7iyHr_2ohwOWLuklpyuoG0Y1ulvq1A9-hyCZ0mpvTEF6om2tAZ9_7h8W9ksiOkey0yA-6ze17MCjfnK2XcbqmSMgOngW1PrD81oKoheMnIeJdcWgF2mk8VDqmAwaDTxMxdnXkzK74rA43a4rWk3d2bUts8dAUkuYXTwJwKQw4LfXtu-mwwgJ6BkT_GiBcBJ6ulBuPsNZfpwPuxox6PS6KpzVTQ94cKNqSIIyFCD4xZsEvPALud09-gmAEDHxdnPjqLSi2U8xd0j-6XYKN0JtZ45kwEIRsOrFu-SYLz1OcYFKI5A5P-vYlzGx1WhEnoeUlyooJBhNj6ZBfj9f63SByxm7sgh260vf1t-4OGzVTIUKFluxkI4ubigLZ-g4q4dSwiEWXn50JFPrtuPs5VxsIIz_lXbh1SrKeQ647KdDSAQZFgEfzOOt3el5K97V1x7V7gEWCCgmqDIz3yZPpwD6qmUQKqlj_p8-OQrniamGULkXrmrgbNQVfV-Qw7Hg6ELw4aHF_IZME9Qnyn7peFhH6ai_YapuNF7FK-MBtPYoMaqBf05U2-uJAVUas3VuT_-pTyHvhtFmB7vc0-qgf_CtVNIXSPq2_vXdQdEwwCVPPwW6xWm-invrzhyQR_mf3OQqZT6_zOHIMPBJUaXcQKT0KTdoBZUDamAR-ECZl8r6wdLCn0HjAEwj3ifUCNMzQ7CZHUQG46rj61YyasNWO__4Ef4kTcApKgljosuABqP4HAdmkP5eEnX-6nutrL50iv-Mms_R-T7SKtmEEf9wihTu4Meb441cU9DI4WwSyiBSnsYdGy9FJKmHwP7HD0FmpmWkOrtROkQVMlMVKQFlKK8OBtxafHYsZkWDawbA1eetzMBzQ3PP8PSvva6SJWjbgURHVm5RjXV8Hk6toIBEDx9r9vAIczSp49eDCkQbzPkGAVilO3KLQpNx2itBbZzgE36uV0neZZsVs7aqafI4qCTQOLzYA8YFDKz92yhgdIzl5VPFLFNHqRS4duPRQImQ7vb6yKSxjDThiyQQUTPBX_EXUAAR7JHwJI1i8la3V', + provider_name='openai', + ), + BuiltinToolCallPart( + tool_name='mcp_server', + args={ + 'action': 'call_tool', + 'server_id': 'deepwiki', + 'tool_name': 'ask_question', + 'tool_args': { + 'repoName': 'pydantic/pydantic-ai', + 'question': 'Provide a brief summary of the repository, including purpose, main features, and status.', + }, + }, + tool_call_id='mcp_0083938b3a28070e0068fabd88db5c81a08e56f163bbc6088b', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'output': """\ +Pydantic AI is a Python agent framework designed to build production-grade applications using Generative AI, emphasizing an ergonomic developer experience and type-safety . It provides type-safe agents, a model-agnostic design supporting over 15 LLM providers, structured outputs with Pydantic validation, comprehensive observability, and production-ready tooling . The project is structured as a UV workspace monorepo, including core framework components, an evaluation system, a graph execution engine, examples, and a CLI tool . + +## Purpose + +The primary purpose of Pydantic AI is to simplify the development of reliable AI applications by offering a robust framework that integrates type-safety and an intuitive developer experience . It aims to provide a unified approach to interacting with various LLM providers and managing complex agent workflows . + +## Main Features + +### Type-Safe Agents +Pydantic AI agents are generic `Agent[Deps, Output]` for compile-time validation, utilizing `RunContext[Deps]` for dependency injection and Pydantic `output_type` for output validation . This ensures that the inputs and outputs of agents are strictly typed and validated . + +### Model-Agnostic Design +The framework supports over 15 LLM providers through a unified `Model` interface, allowing developers to switch between different models without significant code changes . Implementations for providers like OpenAI, Anthropic, and Google are available . + +### Structured Outputs +Pydantic AI leverages Pydantic for automatic validation and self-correction of structured outputs from LLMs . This is crucial for ensuring data integrity and reliability in AI applications . + +### Comprehensive Observability +The framework includes comprehensive observability features via OpenTelemetry and native Logfire integration . This allows for tracing agent runs, model requests, tool executions, and monitoring token usage and costs . + +### Production-Ready Tooling +Pydantic AI offers an evaluation framework, durable execution capabilities, and protocol integrations . +* **Tool System**: Tools can be registered using the `@agent.tool` decorator, with automatic JSON schema generation from function signatures and docstrings . +* **Graph Execution**: The `pydantic_graph.Graph` module provides a graph-based state machine for orchestrating agent execution, using nodes like `UserPromptNode`, `ModelRequestNode`, and `CallToolsNode` . +* **Evaluation Framework**: The `pydantic-evals` package provides tools for creating datasets, running evaluators (e.g., `ExactMatch`, `LLMEvaluator`), and generating reports . +* **Integrations**: It integrates with various protocols and environments, including Model Context Protocol (MCP) for external tool servers, AG-UI for interactive frontends, and Temporal/DBOS for durable execution . + +## Status +The project is actively maintained and considered "Production/Stable" . It supports Python versions 3.10 through 3.13 . The documentation is built using MkDocs and includes API references and examples . + +## Notes +The repository is organized as a monorepo using `uv` for package management . Key packages include `pydantic-ai-slim` (core framework), `pydantic-evals` (evaluation system), `pydantic-graph` (graph execution engine), `examples` (example applications), and `clai` (CLI tool) . + +Wiki pages you might want to explore: +- [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1) + +View this search on DeepWiki: https://deepwiki.com/search/provide-a-brief-summary-of-the_a5712f6e-e928-4886-bcea-b9b75761aac5 +""", + 'error': None, + }, + tool_call_id='mcp_0083938b3a28070e0068fabd88db5c81a08e56f163bbc6088b', + timestamp=IsDatetime(), + provider_name='openai', + ), + ThinkingPart( + content='', + id='rs_0083938b3a28070e0068fabd97008081a0ad1b2362bcb153c9', + signature='gAAAAABo-r2bD-v0Y3pAlyAEK1Sb8qJJcJRKSRtYwymHwLNXY-SKCqd_Q5RbN0DLCclspuPCAasGLm1WM1Q2Y_3szaEEr_OJalXTVEfRvhCJE1iTgoz2Uyf7KttZ4W92hlYjE8cjgdo5tKtSVkNyzTs4JUHKRHoDMutL2KivjZKuK_4n-lo9paJC_jmz6RWO8wUoXo3_fGxjliOGnWyRXwEPmgAcEWNOSVgCgAEO3vXerXRPLie02HegWcLMtK6WORDHd02Kr86QSK3W30bnvU7glAFX6VhSSnR8G0ceAM-ImoomQ8obEDyedX1-pYDKPOa4pZ5iTjD24ABYOwz-0L7SNziQJLycwwsr11Fj0_Au9yJph8YkNb2nAyFeiNVCRjKul51B7dZgz-UZ9juWO2ffeI0GNtQTYzf46_Y1t0qykGW6w59xjmBHTKf5SiSe0pqWxZ6LOLoPx01rX2gLaKgNZZiERSbO0iwbA4tpxb9ur-qeFVv5tS7xy8KFYOa8SPrypvFWDoY6CjSwTS3ir0vyfpbJy-n6bcYP_pTwDZxy_1aVkciim8Tmm_9wYgI0uY5kcA9VYJuyc4cg7S7ykTUxMZz7xiLMf8FoXl1gHbVJrYriyZzh2poYTWlcCuSCiUaXhQKxcxMRrt_P7WANx0n68ENQ40HkoJ6rThvWUuwtmEYqZ0ldh3XSFtyNrqha4PQ5eg_DudlU_5CxyykuzWmi_o5MEW4_XW4b9vdXg1laqx4189_jEuV_JPGNeL3Ke4EbMbKHzsiaGePRZGgNutnlERagmU4VFTeoE5bN3oHlR_Au4PeQxdb7BuBmZRDDCnnIRd2NfSWb7bgfUozkA4S6rm_089OlRBeRVoLtA8zZZinNGtOZl7MtkLnoJVIWpF1rr7D_47eWSyyegUIIS2e5UKLJfCLkNgSlWPU9VquHEzSfqeHfzoN5ccoVwrvrHmeveTjI-wIJygdfuyti5cMgOOkAtLzjWmbs4CjmlWcbZKeidtDj5YpCSmYAGFuZze-cSbNjMv4th639dCu_jmRMze-l2Y5npbRwMqEJr7VLXghmLc1vhOsaQM3gxoF0CJJlmvtR4jxPqhE3694YRva6LS1WjR4oueM6zfpVeB2kC0hQgqaL6MiwtTRYFfuCzEHi18TwA5bqqkfgrDXedmjAzlEGSZFe2EBRlF_ZtagrVVTCagHQArnH3DkVQMEDCHCqDxA_PINR_997IxeNgGPsvazVdOOBef7sO4rvAWrC94nIlt7d4aViqbTNMW-W8rqjGFOqj1swrM0yoX5y6LY5oXPc3Mu35xeitn_paqtGPkvuH6WeGzAiNZFDoQkUdLkZ4SIH2lr4ZXmMI3nuTzCrwyshwcEu-hhVtGAEQEqVrIn8J75IzYTs1UGLBvhmcpHxCfG04MFNoVf-EPI4SgjNEgV61861TYshxCRrydVhaJmbLqYh8yzLYBHK6oIymv-BrIJ0LX222LwoGbSc0gMTMaudtthlFXrHdnswKf81ubhF7viiD3Y=', + provider_name='openai', + ), + TextPart( + content=IsStr(), + id='msg_0083938b3a28070e0068fabd989bb481a08c61416ab343ef49', + ), + ], + usage=RequestUsage(input_tokens=1207, output_tokens=535, details={'reasoning_tokens': 320}), + model_name='o4-mini-2025-04-16', + timestamp=IsDatetime(), + provider_name='openai', + provider_details={'finish_reason': 'completed'}, + provider_response_id='resp_0083938b3a28070e0068fabd81970881a0a1195f2cab45bd04', + finish_reason='stop', + ), + ] + ) + + messages = result.all_messages() + result = await agent.run('What packages does the repo contain?', message_history=messages) + assert result.new_messages() == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content='What packages does the repo contain?', + timestamp=IsDatetime(), + ) + ], + instructions='You are a helpful assistant.', + ), + ModelResponse( + parts=[ + ThinkingPart( + content='', + id='rs_0083938b3a28070e0068fabd9de42881a08fbb49a65d0f9b06', + signature='gAAAAABo-r2izZacxe_jVh_p3URhewxBJuyLNqkJOd0owsDPt9uCE7MXn06WHhO_mp6gLDAqcF1uhMhXCqwztJ1Nbpc0cEDAxUpUCUn2bKSgG6r8Snc_FPtKGgQWDsByvW_Nigx55CyPuNeDO_MiDgYee_WeUw7ASLPfiGOx_9YNc_BFYo1ngsb8CKZcJn3AoponMheLoxkVAPgOjMgteRVaQTr13MljTDUlBIZLIOhVbtIu_dI23saXPigbgwR4RhGn5mCHG_a9ILNkXDJUmGy5TKklIEi2HuJM3ZJ3gfoGYS3OONvzmU4AgMP2UrU17YKZAYKxUBKSpyAqigd4RJSYWzxBCoYzCTmiITwdZ6Cpsw1X9Wox_TQSGt5G2Xu0UY2TQZGRNNH8knJpWs-UQxBBV4L3alMwJuIeV-uzqeKr5fKO5rL_c9as-qQIW_EGQItjvR5z80Hi-S9VXthWCmtqZFIJkgLB5JfTYuFL86valsFVLzSavUIWJAG5qOcxag2mbZMwMRRNfvR__BBtoqBoeGIqveQAbIeZbG0ymw30PH1a2v1mmSrpkK6PB3AHYRDdpkezXLkbyGYgidyV2DAAtPaFplsubWCh_74UxmOuk4BH-9cWkE15mRUBrvtnbTb793RsPzOe7nPmkMpdgqa3nqc6RcQZ_M30lFLUViAbfpEpMVrCzz2cv1RklT1JUzpuVXBTKqQ4FxVCfnvzSgQ2INQ8K50E1X5w_7TAWhrHbNg6LetCa-4KWe9ps0GH6r1x9FWvGyVxSwa7SIdPq3sGpxjOydluPECbBOnHWFUB-3rI2DcUl4rGWYbv2FEFNeCH9Zr67uUvMc4Doi8nVMoeb1lJxFCrfziGhbEXY0FepH3zIzlj-_dXqLAL1qqhfCznT_xkDMVYg-D5gMu-_p3r2SirjJbeaz5UFmP-Dihd9v7jWgD6hx_Mq1uIdzIPE8ImGiDPR7PK64svkvwYg1Czdrc_7GmrKRuzsBL0720UXe19NQqCZfYvUJAjgbEqr3tuS_RkhuEQeeVORn88xkhkrGCEgBS0LHFpe4tcnUEXKnaYYRnoYtk5xo4EyOGVKR2yhF9ht2zrMTo83YuRAPcNT38Jk4gMtVhBaJw_GOfee-IWN_F258rpmU4p8sRV-1iSuQI3Arm4JBU66QuyjoY-KJmTcE9ft3Bfm9If3yG5W0RFRJrsVb--GjHmiiXDGWiR5Q8L1of_RnSD5QDEbXXxhn4dsDejtCXUaQXE9Ty-NvkvA7G6Ru8cMvIKqP2fXS9SmiW6ePJ2Znrlyafxx6L58pT26RF42h90BVrSldf6SjxQApK3AKZW6q8AkuJnYWTtkR9-qfIDl7W94BsgOFoEd-SDQGxWzGJV9YqAu6_SQKiNDQoZZHrJkRSOPEW_b3-BAdrpwL700I92Rye4-BdhlgeK1RwhT3w1Z-z1tvGZXJtPwdpPa3iIw2TIlesMbC1ZJ22iT3CB_r0lnlZhMtIH6o50l50UGfSDuv8HZ_RNgGnYEPqP3FW-o_VD_Yu_KBqGSA0Eb5xAJjl0vpin2vFGO1P4RdgI17eZXRsCp1KvkpWjbEQTWAvJz39yr7wFQ4BrPfgxUqMP0-ZI_h1DkdPBzWs1uKqHw-4qC77sZXgxgHGEIU1tfKosTy_fK4c-WAbdqIHNTh9VdlM1EdrUJQ4rs2rsUG8o9WXwnGTFchI9Ao64LiCFTFTiFL_dvKI4ZraNNXXprfPhxsdLBaNfgj2CIfUwBMJ9xMGmHKQKLtwZdHpQNVqi8DNm1qjvs3CxbSXGKtkl5K8UhJtI1g4OnEnbq3jDO8DGIyDl0NH-0bcCDqS2yAkh8I3IobzxTg16mqU3roXLQ4pGXnWbx26A_9zb4Y1jV7rzCq24VIfNJzMUtW4fVMYzlrp3X1l32I5hF3YP-tU2paD98xobgc2Cn2RWXd3OirrdjKAE088KhXYLZZY59y4LYRLC6MDMHSX0cbEXbBvl6mKmbaFig2_7ICiSa7rR_Ij6PpQRxIW7NfS7ZMu5w7TnhLJyg5nuwMI8A5pVxfy3gYg2L60wepuX7UUV0USaHNKi8qxbp4RJj4nO-GdE8TbLJtvPw-OzrH9Qiv7iDHVMHOe1CDPLD5IeGqmVB0tuLqlyASuIe3oPxTU7QdctyxHa1z-sO8nN6kpPnzmVmS6XK8bY-h5do28dkZvefomSquXwKeiVg9VAMWVziKLPWWg5iWp2x-spLkWcQsQle2T7xizyETaF1t6YbecXtSoVFmu90_o6ns07etU3RVK1YpQLgqUIJwwF3ZwP65MaWPwqDuWCuoQErlApdhRptxId67KE3UC4j8cAaGSoG0kXnws-jzpPyAg1GU8c-Gu_K0F-h-KFbHPMiWCrrQqzVfvoA2wLaQz3NPAqpq-kbFmrXRGkzLIeIvRVxck-sKkxQIcg3amSV5Dykl-lRCXGxlWNiFG_1SFrTSfp5VKyg7l1KjJzXUXHtqAErsPtMyhxaMmlh4An5a8NIaM9W6tafJrBXpUh85DfwZ8W92OAi1WOgoJIwWXSSeSuo6ECDstjVWW3OQQh9183jliwS7Bis3eu9jgAF3q8sYILBdwjrJRa6aAna2GirNwqZMEIg60kIlvmf1U6S2PgYaPm9UDzvMxjpzwjhXhzxHJitfU1tfl0vo-ATaTV8CxmKerNzy2AjlIZnjknG3xLyonCHbGbAe33QQTclb98y_vr5nA4WKlrls413o0a0f8GL8GjINCOd1RHVMjV', + provider_name='openai', + ), + TextPart( + content="""\ +The monorepo is organized into these main packages: \n\ + +• pydantic-ai-slim\u2003– core agent framework (type-safe agents, model interface, tooling) \n\ +• pydantic-evals\u2003\u2003– evaluation system (datasets, metrics, evaluators, reports) \n\ +• pydantic-graph\u2003\u2003– graph-based execution engine (state-machine orchestration) \n\ +• clai\u2003\u2003\u2003\u2003\u2003\u2003\u2003– CLI for scaffolding and running agents \n\ +• examples\u2003\u2003\u2003\u2003– sample apps & demos showing real-world usage\ +""", + id='msg_0083938b3a28070e0068fabda04de881a089010e6710637ab3', + ), + ], + usage=RequestUsage(input_tokens=1109, output_tokens=444, details={'reasoning_tokens': 320}), + model_name='o4-mini-2025-04-16', + timestamp=IsDatetime(), + provider_name='openai', + provider_details={'finish_reason': 'completed'}, + provider_response_id='resp_0083938b3a28070e0068fabd9d414881a089cf24784f80e021', + finish_reason='stop', + ), + ] + ) + + +async def test_openai_responses_model_mcp_server_tool_stream(allow_model_requests: None, openai_api_key: str): + m = OpenAIResponsesModel('o4-mini', provider=OpenAIProvider(api_key=openai_api_key)) + agent = Agent( + m, + instructions='You are a helpful assistant.', + builtin_tools=[ + MCPServerTool( + id='deepwiki', + url='https://mcp.deepwiki.com/mcp', + allowed_tools=['ask_question', 'read_wiki_structure'], + ), + ], + ) + + event_parts: list[Any] = [] + + async with agent.iter( + user_prompt='Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short' + ) as agent_run: + async for node in agent_run: + if Agent.is_model_request_node(node) or Agent.is_call_tools_node(node): + async with node.stream(agent_run.ctx) as request_stream: + async for event in request_stream: + if ( + isinstance(event, PartStartEvent) + and isinstance(event.part, BuiltinToolCallPart | BuiltinToolReturnPart) + ) or (isinstance(event, PartDeltaEvent) and isinstance(event.delta, ToolCallPartDelta)): + event_parts.append(event) + + assert agent_run.result is not None + messages = agent_run.result.all_messages() + assert messages == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content='Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short', + timestamp=IsDatetime(), + ) + ], + instructions='You are a helpful assistant.', + ), + ModelResponse( + parts=[ + BuiltinToolCallPart( + tool_name='mcp_server', + args={'action': 'list_tools', 'server_id': 'deepwiki'}, + tool_call_id='mcpl_00b9cc7a23d047270068faa0e29804819fb060cec0408ffbcd', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'tools': [ + { + 'input_schema': { + 'type': 'object', + 'properties': { + 'repoName': { + 'type': 'string', + 'description': 'GitHub repository: owner/repo (e.g. "facebook/react")', + } + }, + 'required': ['repoName'], + 'additionalProperties': False, + '$schema': 'http://json-schema.org/draft-07/schema#', + }, + 'name': 'read_wiki_structure', + 'annotations': {'read_only': False}, + 'description': 'Get a list of documentation topics for a GitHub repository', + }, + { + 'input_schema': { + 'type': 'object', + 'properties': { + 'repoName': { + 'type': 'string', + 'description': 'GitHub repository: owner/repo (e.g. "facebook/react")', + }, + 'question': { + 'type': 'string', + 'description': 'The question to ask about the repository', + }, + }, + 'required': ['repoName', 'question'], + 'additionalProperties': False, + '$schema': 'http://json-schema.org/draft-07/schema#', + }, + 'name': 'ask_question', + 'annotations': {'read_only': False}, + 'description': 'Ask any question about a GitHub repository', + }, + ], + 'error': None, + }, + tool_call_id='mcpl_00b9cc7a23d047270068faa0e29804819fb060cec0408ffbcd', + timestamp=IsDatetime(), + provider_name='openai', + ), + ThinkingPart( + content='', + id='rs_00b9cc7a23d047270068faa0e4cd5c819f8855c183ff0fe957', + signature='gAAAAABo-qDma-ZMjX6meVDoCLYMqgkbQoEVzx_VFnmBFRLqsq37MiF7LP1HrMpqXqtrZ0R2Knb6lUiGSKhsOjOUAn9IFNUCuJx23cPLObF2CKt86wGLb7vccbCrp8bx-I6-kUtZASjlJx7_eJnvwyr24FLZlaDyGDuqRecGA8H4tXnQSAQTT9fJqy8h8dXvxvYzNj5rgOUWgRGn1NBph164KpiEzVWHADzZ_K0l4fX-DFHgtNFssPDYqOKLs_nU0XO8xaIZOgJ8QTf0XmHYF02GA_KciV6sIlSzVricQkwmu1XfJbjpME8XmRMIzlnLRqC8SAJs2kiaYnA8ObfI-s0RbRd3ztIUrzmAsdeo13ualD3tqC1w1_H6S5F47BB47IufTTbpwe_P6f5dLGpOzcrDPbtfHXv-aAW5YEsGyusXqxk51Wp7EONtADmPmVLJffFbRgnwfvPslbxxpNGfxNkN2pIs3U1FW7g1VvmxUfrF84LJpPKvs3xOaWXGorrPBY5nUyeRckhDFt6hGdS59VICmVy8lT4dL_LNswq7dVRS74HrrkfraXDDm2EhL2rtkwhiMqZtuYFsyIK2ys0lZuhNAkhtfgIoV8IwY6O4Y7iXbODxXUr48oZyvLdgV2J2TCcyqIbWClh3-q8MXMmP5wUJdrqajJ8lMVyhQt0UtMJKyk6EWY1DayGpSEW6t8vkqmuYdhyXQOstluONd31LqnEq58Sh8aHCzrypjcLfjDRo5Om1RlxIa-y8S-6rEIXahcJCX_juSg8uYHzDNJffYdBbcLSVQ5mAVl6OM9hE8gHs7SYqw-k-MCeoYsZwt3MqSV7piAu91SMZqB0gXrRDD67bdhmcLBYKmZYKNmLce60WkLH0eZMPSls-n2yyvmwflJA---IZQZOvYXpNUuS7FgMrh3c7n9oDVp15bUgJ8jDx6Mok4pq9E-MHxboblGUpMlFCJDH3NK_7_iHetcqC6Mp2Vc5KJ0OMpDFhCfT3Bvohsee5dUYZezxAkM67qg0BUFyQykulYLHoayemGxzi1YhiX1Of_PEfijmwV2qkUJodq5-LeBVIv8Nj0WgRO-1Y_QW3AWNfQ80Iy6AVa8j9YfsvQU1vwwE9qiAhzSIEeN1Pm2ub8PaRhVIFRgyMOLPVW7cDoNN8ibcOpX-k9p_SfKA9WSzSXuorAs80CTC9OwJibfcPzFVugnnBjBENExTQRfn4l7nWq-tUQNrT4UNGx-xdNeiSeEFCNZlH50Vr5dMaz5sjQQEw_lcTrvxKAV5Zs1mtDf6Kf29LkqhuUEdlMLEJwnAdz2IHLIy41zWLQctSnzBl9HB3mkw8eHZ1LdaRBQRFH4o7Rumhb3D1HdIqDLWeE3jkA6ZBAh2KadGx1u3AIIh4g3dHUS6UREkmzyRIuImbdTsoin1DrQbuYbaqZwIqU4TTIEmA8VeohMfff0rIL5yyFy7cfgGYurgAyMhARPGAAMAoTrR8ldWwymzPkGOJ_SQlzfNGV8weHOEYUl2BgQe57EDX4n1Uk294GIbvGR7eLRL_TLBUyHQErCaOCi8TkBNlLXIobw4ScN_jqqtURmC0mjRDVZeBi6hfrVShWChpQR8A2HxxHrcuHi2hi_2akgUea3zz6_zbUYVoIRdOa9DvZuN015E8ZSL-v_1_vOzUGvt0MuWPazjiRDWgpgcISYzT8N-Xzu_EbwO1OsaOFIeUqrD8mZ6MKOuBQts68og0DWo8KQaHmCaWi4O-c8-5fbB2q3H6oiIoZtSJIoowAmFGOwyWxn_OPS9svDgEaeFYEYhXZ5wZDphxoHkjJ703opxrWoEfQw==', + provider_name='openai', + ), + BuiltinToolCallPart( + tool_name='mcp_server', + args='{"action":"call_tool","server_id":"deepwiki","tool_name":"ask_question","tool_args":{"repoName":"pydantic/pydantic-ai","question":"What is the pydantic/pydantic-ai repository about?"}}', + tool_call_id='mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'error': None, + 'output': """\ +The `pydantic/pydantic-ai` repository is a Python agent framework designed to simplify the development of production-grade applications using Generative AI . It aims to bring the ergonomic developer experience and type-safety philosophy of Pydantic and FastAPI to AI agent development . + +## Core Purpose and Features + +The framework focuses on providing a robust and type-safe environment for building AI agents . Key features include: + +* **Type-safe Agents**: Agents are generic `Agent[Deps, Output]` for compile-time validation, leveraging Pydantic for output validation and dependency injection . +* **Model-agnostic Design**: It supports over 15 LLM providers through a unified `Model` interface, allowing for easy switching between different models and providers . +* **Structured Outputs**: Automatic Pydantic validation and reflection/self-correction ensure structured and reliable outputs from LLMs . +* **Comprehensive Observability**: Integration with OpenTelemetry and native Logfire provides real-time debugging, performance monitoring, and cost tracking . +* **Production-ready Tooling**: This includes an evaluation framework (`pydantic-evals`), durable execution capabilities, and various protocol integrations like MCP, A2A, and AG-UI . +* **Graph Support**: It provides a way to define graphs using type hints for complex applications . + +## Framework Architecture + +The framework is structured as a UV workspace monorepo, containing several packages . + +### Core Packages + +* `pydantic-ai-slim`: Contains the core framework components such as `Agent`, `Model`, and tools . +* `pydantic-ai`: A meta-package that includes all optional extras . + +### Supporting Packages + +* `pydantic-graph`: Provides the graph execution engine with `Graph` and `BaseNode` . +* `pydantic-evals`: An evaluation framework for datasets and evaluators . +* `examples`: Contains example applications . +* `clai`: Provides a CLI interface . + +## Agent Execution Flow + +The `Agent` class serves as the primary orchestrator . Agent execution is graph-based, utilizing a state machine from `pydantic_graph.Graph` . The execution involves three core node types: + +* `UserPromptNode`: Processes user input and creates initial `ModelRequest` . +* `ModelRequestNode`: Calls `model.request()` or `model.request_stream()` and handles retries . +* `CallToolsNode`: Executes tool functions via `RunContext[Deps]` . + +The `Agent` provides methods like `run()`, `run_sync()`, and `run_stream()` for different execution scenarios . + +## Model Provider Support + +The framework offers a unified `Model` abstract base class for various LLM providers . This includes native support for providers like OpenAI, Anthropic, Google, Groq, Mistral, Cohere, and Bedrock . Additionally, many OpenAI-compatible providers can be used with `OpenAIChatModel` . + +## Tool System + +Tools are registered using the `@agent.tool` decorator . The system automatically generates JSON schemas from function signatures and docstrings, validates tool call arguments, and provides context injection via `RunContext[Deps]` . + +## Observability Integration + +Pydantic AI integrates with OpenTelemetry, allowing for instrumentation of agent runs, model requests, and tool executions . It has native integration with Pydantic Logfire for enhanced monitoring and visualization . + +## Evaluation Framework + +The `pydantic-evals` package provides a framework for systematically testing and evaluating AI systems . It supports defining datasets with `Case` objects and using various evaluators, including built-in and custom ones . + +## Integration Ecosystem + +Pydantic AI supports various integrations for development and production: + +* **Model Context Protocol (MCP)**: For external tool server access . +* **AG-UI Protocol**: For interactive application frontends . +* **Agent2Agent (A2A)**: For multi-agent communication and workflows . +* **Temporal**: For durable workflow execution . +* **DBOS**: For database-backed execution and state persistence . + +## Notes + +The `CLAUDE.md` file provides guidance for Claude Code when working with the repository, including development commands and an overview of core components and design patterns . The `mkdocs.yml` file defines the structure and content of the project's documentation, including navigation, plugins, and watch directories for various packages . The `docs/install.md` file details how to install the `pydantic-ai` package and its optional components, including a "slim" installation option for specific model dependencies . + +Wiki pages you might want to explore: +- [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1) + +View this search on DeepWiki: https://deepwiki.com/search/what-is-the-pydanticpydanticai_e234e9cf-d4aa-4c67-a257-56034816dd56 +""", + }, + tool_call_id='mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac', + timestamp=IsDatetime(), + provider_name='openai', + ), + ThinkingPart( + content='', + id='rs_00b9cc7a23d047270068faa0f4ff54819f9fb9ff25bebe7f5f', + signature='gAAAAABo-qD2WTMmhASwWVtFPlo7ILZP_OxHfRvHhda5gZeKL20cUyt0Np6wAHsJ6pyAsXCkLlKBVz3Vwm52JrJuUbqmw-zlXL19rbpvTPRMkiv_GdSfvmxKKNJvSm417OznBDVjsIAqmes2bMq03nRf6Pq2C0oUJnIbpbMwtWzs3jMQqUb0IwyopqXGhn3MWKctLPKZS89nyL4E9kJAx_TyWTQvME8bf8UrV8y2yrNz9odjSQQyZq5YXrlHzpOJjDTfLofVFjsEzM8J29SdLcWnqlv4djJ8xeMpP2ByXuHRnTEyNNuxpYJB7uQbYT0T_eLhwcLv2ZzDZ_hf2Msv7ZdyuPc7Yxc5YWlChB0iaHqQ_8UuMjIVurfgSIjSq2lTvJwdaA365-ZoBMpo4mG04jQDP3XM-0xEM6JTFWc4jZ1OjIXVpkjaXxdOOkYq3t3j8cqBQH69shFCEQr5tnM8jOEl3WHnkvaBg4xEMcd61hiLOKnWbQiYisbFucA8z5ZNbdohUZd-4ww0R8kSjIE5veiyT66gpIte0ItUnTyhIWy8SZYF9bnZGeS-2InDhv5UgjF2iXzgl6dmUrS-_ITgJkwu4Rdf9SBDJhji3_GUO9Za0sBKW8WohP142qY0Tbq4I6-7W1wJ3_gHJqiXVwDLcY90ODSyyC5_I3MgaALRC1wt55sHSeSsDjmNGmiH-m0snaqsI0JnAZwycnWCK17NamjQ9SxVM5tTqJgemkGFQNH1XhZPWvVj56mlj74KKbCJALQpdXD27C8LfdrlBd0v_zEmF1dh7e12I95fYeAlO51xOglBaMCgcMWSDHMGHsJBbJ04eVQSwYTl72rmkASTMaybD-aAm1m8qZnKU-f3xQradhs9l1x9eOfQDIsfWMr1aVMiZi59--VsrgYCbqBj7AGf8n6VNbQWkhO2etozwYZcdGIyiu4TaULX1Xp89Gb28M-tVkIrkQoHO_Z7wzKU1HRBViES1wRKUJ-Sa6wc8UP5orDxeOTFPUr7JL-qaj49cpKzvdlfuoIdbYwpsNvAg69sNbFI3w4jLxOT4yxS6thra1Bit6SY5wAEfrrjtzofLeg49aFqFVGIHeJ8kE3spc1rctpETkdHNyP9fEjZaM3mxR4yz0tPmEgUsd-sdw5BbOKDAVzwconmbeGBmf9KLXMEpRRH7-qSIWUscCi5qIdHXGYoQkStsNGrnhucn_hwqZCSti3Kbzfosud3zQPjW6NyuJCdeTxbDbsnrV7Lkge5j92pyxCHw9j0iuzofRW55_KToBtIvRoPr_37G_6d6TxK42mKqdbgk9GHrcXf27mXszCEzX-VfRVTxyc6JLfEy1iikdo-J2AzXPd4m3zE-zazBU3Z5ey596g8gxwXMkHakLrvwp4_-fQfcvs7sIH34xkEhz7BRdNok3Aqbu_zCt2np69jjHqfPQWZzAy1C-bmMuhAaItPYkkw-LgSu-YP6L89zNofK9Q_S3JwVsLN-fq-9OwhSjy_rQu22Gn4KD6saAu61QMXBPa6z0QJSFUZHJQ_megq1tENfB6wRVtQ0DdAvUwhUsMwx6yE9CT20bma4CloGW__aZuD9gikdQrQ1DCHOvTrfEpvHkl6-wuCImeNjsCvbRFAkx6Xgpc6fdbq4j6WyEVW_4VePNknFWYZ1cw795ka5uJMLc3hVughVlGwDbw60Q3utsjHPbu03pxPle5pdcVEYSQWa0WbFDCrF4ysK0lpmlF7', + provider_name='openai', + ), + TextPart( + content=IsStr(), + id='msg_00b9cc7a23d047270068faa0f63798819f83c5348ca838d252', + ), + ], + usage=RequestUsage(input_tokens=1401, output_tokens=480, details={'reasoning_tokens': 256}), + model_name='o4-mini-2025-04-16', + timestamp=IsDatetime(), + provider_name='openai', + provider_details={'finish_reason': 'completed'}, + provider_response_id='resp_00b9cc7a23d047270068faa0e25934819f9c3bfdec80065bc4', + finish_reason='stop', + ), + ] + ) + + assert event_parts == snapshot( + [ + PartStartEvent( + index=0, + part=BuiltinToolCallPart( + tool_name='mcp_server', + args={'action': 'list_tools', 'server_id': 'deepwiki'}, + tool_call_id='mcpl_00b9cc7a23d047270068faa0e29804819fb060cec0408ffbcd', + provider_name='openai', + ), + ), + PartStartEvent( + index=1, + part=BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'tools': [ + { + 'input_schema': { + 'type': 'object', + 'properties': { + 'repoName': { + 'type': 'string', + 'description': 'GitHub repository: owner/repo (e.g. "facebook/react")', + } + }, + 'required': ['repoName'], + 'additionalProperties': False, + '$schema': 'http://json-schema.org/draft-07/schema#', + }, + 'name': 'read_wiki_structure', + 'annotations': {'read_only': False}, + 'description': 'Get a list of documentation topics for a GitHub repository', + }, + { + 'input_schema': { + 'type': 'object', + 'properties': { + 'repoName': { + 'type': 'string', + 'description': 'GitHub repository: owner/repo (e.g. "facebook/react")', + }, + 'question': { + 'type': 'string', + 'description': 'The question to ask about the repository', + }, + }, + 'required': ['repoName', 'question'], + 'additionalProperties': False, + '$schema': 'http://json-schema.org/draft-07/schema#', + }, + 'name': 'ask_question', + 'annotations': {'read_only': False}, + 'description': 'Ask any question about a GitHub repository', + }, + ], + 'error': None, + }, + tool_call_id='mcpl_00b9cc7a23d047270068faa0e29804819fb060cec0408ffbcd', + timestamp=IsDatetime(), + provider_name='openai', + ), + ), + PartStartEvent( + index=3, + part=BuiltinToolCallPart( + tool_name='mcp_server', + tool_call_id='mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac', + provider_name='openai', + ), + ), + PartDeltaEvent( + index=3, + delta=ToolCallPartDelta( + args_delta='{"action":"call_tool","server_id":"deepwiki","tool_name":"ask_question","tool_args":', + tool_call_id='mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac', + ), + ), + PartDeltaEvent( + index=3, + delta=ToolCallPartDelta( + args_delta='{"repoName":"pydantic/pydantic-ai","question":"What is the pydantic/pydantic-ai repository about?"}', + tool_call_id='mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac', + ), + ), + PartDeltaEvent( + index=3, + delta=ToolCallPartDelta( + args_delta='}', tool_call_id='mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac' + ), + ), + PartStartEvent( + index=4, + part=BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'error': None, + 'output': """\ +The `pydantic/pydantic-ai` repository is a Python agent framework designed to simplify the development of production-grade applications using Generative AI . It aims to bring the ergonomic developer experience and type-safety philosophy of Pydantic and FastAPI to AI agent development . + +## Core Purpose and Features + +The framework focuses on providing a robust and type-safe environment for building AI agents . Key features include: + +* **Type-safe Agents**: Agents are generic `Agent[Deps, Output]` for compile-time validation, leveraging Pydantic for output validation and dependency injection . +* **Model-agnostic Design**: It supports over 15 LLM providers through a unified `Model` interface, allowing for easy switching between different models and providers . +* **Structured Outputs**: Automatic Pydantic validation and reflection/self-correction ensure structured and reliable outputs from LLMs . +* **Comprehensive Observability**: Integration with OpenTelemetry and native Logfire provides real-time debugging, performance monitoring, and cost tracking . +* **Production-ready Tooling**: This includes an evaluation framework (`pydantic-evals`), durable execution capabilities, and various protocol integrations like MCP, A2A, and AG-UI . +* **Graph Support**: It provides a way to define graphs using type hints for complex applications . + +## Framework Architecture + +The framework is structured as a UV workspace monorepo, containing several packages . + +### Core Packages + +* `pydantic-ai-slim`: Contains the core framework components such as `Agent`, `Model`, and tools . +* `pydantic-ai`: A meta-package that includes all optional extras . + +### Supporting Packages + +* `pydantic-graph`: Provides the graph execution engine with `Graph` and `BaseNode` . +* `pydantic-evals`: An evaluation framework for datasets and evaluators . +* `examples`: Contains example applications . +* `clai`: Provides a CLI interface . + +## Agent Execution Flow + +The `Agent` class serves as the primary orchestrator . Agent execution is graph-based, utilizing a state machine from `pydantic_graph.Graph` . The execution involves three core node types: + +* `UserPromptNode`: Processes user input and creates initial `ModelRequest` . +* `ModelRequestNode`: Calls `model.request()` or `model.request_stream()` and handles retries . +* `CallToolsNode`: Executes tool functions via `RunContext[Deps]` . + +The `Agent` provides methods like `run()`, `run_sync()`, and `run_stream()` for different execution scenarios . + +## Model Provider Support + +The framework offers a unified `Model` abstract base class for various LLM providers . This includes native support for providers like OpenAI, Anthropic, Google, Groq, Mistral, Cohere, and Bedrock . Additionally, many OpenAI-compatible providers can be used with `OpenAIChatModel` . + +## Tool System + +Tools are registered using the `@agent.tool` decorator . The system automatically generates JSON schemas from function signatures and docstrings, validates tool call arguments, and provides context injection via `RunContext[Deps]` . + +## Observability Integration + +Pydantic AI integrates with OpenTelemetry, allowing for instrumentation of agent runs, model requests, and tool executions . It has native integration with Pydantic Logfire for enhanced monitoring and visualization . + +## Evaluation Framework + +The `pydantic-evals` package provides a framework for systematically testing and evaluating AI systems . It supports defining datasets with `Case` objects and using various evaluators, including built-in and custom ones . + +## Integration Ecosystem + +Pydantic AI supports various integrations for development and production: + +* **Model Context Protocol (MCP)**: For external tool server access . +* **AG-UI Protocol**: For interactive application frontends . +* **Agent2Agent (A2A)**: For multi-agent communication and workflows . +* **Temporal**: For durable workflow execution . +* **DBOS**: For database-backed execution and state persistence . + +## Notes + +The `CLAUDE.md` file provides guidance for Claude Code when working with the repository, including development commands and an overview of core components and design patterns . The `mkdocs.yml` file defines the structure and content of the project's documentation, including navigation, plugins, and watch directories for various packages . The `docs/install.md` file details how to install the `pydantic-ai` package and its optional components, including a "slim" installation option for specific model dependencies . + +Wiki pages you might want to explore: +- [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1) + +View this search on DeepWiki: https://deepwiki.com/search/what-is-the-pydanticpydanticai_e234e9cf-d4aa-4c67-a257-56034816dd56 +""", + }, + tool_call_id='mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac', + timestamp=IsDatetime(), + provider_name='openai', + ), + ), + ] + ) + + +async def test_openai_responses_model_mcp_server_tool_with_connector(allow_model_requests: None, openai_api_key: str): + m = OpenAIResponsesModel( + 'o4-mini', + provider=OpenAIProvider(api_key=openai_api_key), + ) + agent = Agent( + m, + instructions='You are a helpful assistant.', + builtin_tools=[ + MCPServerTool( + id='google_calendar', + url='x-openai-connector:connector_googlecalendar', + authorization_token='fake', + description='Google Calendar', + allowed_tools=['search_events'], + ), + ], + ) + + result = await agent.run('What do I have on my Google Calendar for today?') + assert result.all_messages() == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart(content='What do I have on my Google Calendar for today?', timestamp=IsDatetime()) + ], + instructions='You are a helpful assistant.', + ), + ModelResponse( + parts=[ + BuiltinToolCallPart( + tool_name='mcp_server', + args={'action': 'list_tools', 'server_id': 'google_calendar'}, + tool_call_id='mcpl_0558010cf1416a490068faa0f9679481a082dc4ac08889f104', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'tools': [ + { + 'input_schema': { + 'properties': { + 'calendar_id': { + 'anyOf': [{'type': 'string'}, {'type': 'null'}], + 'default': None, + 'description': "The ID of the calendar to search. Default one is 'primary'", + 'title': 'Calendar Id', + }, + 'max_results': {'default': 50, 'title': 'Max Results', 'type': 'integer'}, + 'next_page_token': { + 'anyOf': [{'type': 'string'}, {'type': 'null'}], + 'default': None, + 'title': 'Next Page Token', + }, + 'query': { + 'anyOf': [{'type': 'string'}, {'type': 'null'}], + 'default': None, + 'title': 'Query', + }, + 'time_max': { + 'anyOf': [{'type': 'string'}, {'type': 'null'}], + 'default': None, + 'description': "Time in the ISO-8601 format. You can also use 'now' or leave null.", + 'title': 'Time Max', + }, + 'time_min': { + 'anyOf': [{'type': 'string'}, {'type': 'null'}], + 'default': None, + 'description': "Time in the ISO-8601 format. You can also use 'now' or leave null.", + 'title': 'Time Min', + }, + 'timezone_str': { + 'anyOf': [{'type': 'string'}, {'type': 'null'}], + 'default': None, + 'description': "Timezone of the event. Default is 'America/Los_Angeles'", + 'title': 'Timezone Str', + }, + }, + 'title': 'search_events_input', + 'type': 'object', + }, + 'name': 'search_events', + 'annotations': {'read_only': True}, + 'description': 'Look up Google Calendar events using various filters.', + } + ], + 'error': None, + }, + tool_call_id='mcpl_0558010cf1416a490068faa0f9679481a082dc4ac08889f104', + timestamp=IsDatetime(), + provider_name='openai', + ), + ThinkingPart( + content='', + id='rs_0558010cf1416a490068faa0fb684081a0a0b70f55d8194bb5', + signature='gAAAAABo-qEE669V-_c3vkQAeRtSj9pi72OLJweRJe4IRZkLcFfnuwdxSeJM5DVDLzb3LbfzU0ee6a4KAae0XsETU3hELT1hn3LZPwfFku5zl7CVgsc1DmYBf41Qki1EPHFyIlMj937K8TbppAAqMknfLHHwV1FLb8TapccSEhJbzGutqD3c2519P9f6XHKcuDa8d-sjyUejF0QuSjINFcjifJ8DiU40cL_-K6OJotlx6e0FqOivz6Nlj13QZxQ0I3FiiSi03mYKy240jYMpOpjXr7yPmEXLdCJdP5ycmTiJLxf4Bugww6u4F2uxy22978ACyFGSLHBiQyjczj_can7qKXAkMwYJKcGNjaNi8jG5iTIwsGswRjD1hvY-AGUotMFbPCszX3HW1M_ar-livaheiZauCfKV-Uc1ZeI3gijWEwtWQ0jye29FyQPCCpOBvT6RbUvFEpfqpwcMQuUhOyEfgzli2dpuOAgkSjCPE6ctoxjbYa62YzE-yrXAGc5_ptQy_2vw7t0k3jUzSo2Tv0aKnqvvKcj9SIilkZV4Nf-TL_d2E7d48bBJDlqbAv7fkhhd2YlkLqwdR1MqZtygcR1Jh8p2Y1pFAa4mSj7hh4M-zfSu--6dij2iKIbnKQ4DbXyGpMZXBAqTHMe9PPOwGxWKShlN5a5T89B04d_GwJYBDJx2ctecqZxDMjkTn3wVGl_5wuDnrEgd0I91vmAoYuWldR_h8M_FjDFiHefdbZjw1TxVKjkp6wk6zQiXCvvCZYJa9XkhytcllWvUI4C0gbxHrEzZRy9Vii3buqnbiIM9Qj0VPx-Q-FKM_usZBBmlvmk9PMQ8rH9vVT8dRFNQEj-aqudB5yUcTx8XaUFwYAts04OObGBqXoazYtxh6WvHwrf09pb_g0dwzE_rlcQdYxcFLOpYD-AentRAjOuIr4bLRM9BMERBxPvvPCxZ2Mva8YqV2TIOtxzMY08freim6du1IuYprO6CoejPaBdULhct-nsPubOdjLBikZt_bwumvmqGXnxI_uu51b9HtzPeDpWIjF6pi88bcsOk0qglA9GAu3wwX-iIdaV19VdVCO4KJjxiVrbTY1IVgWSdz98Alb_HzpXsoS6i2PRAjjsYOe4RBX3etxjsY07XXLlmXAM_vuYXc8Y6STxvBk4ST4OkaCvUk9DoZbVL5KmVcT6TaFpbVCOB_eHkHIvMjXc35kzxCdqEMG3FpRzL_UkY8pPridvq2z1Xw0al2KEBvdKPlInB8-zX5ANGeRkMGZ6ZfyX1zCIdYLe3wrC8xqr5nUZ-ueWmtqYLavSg8mQKphp4QyVaiwtbxEt5GEiVG7_LR754mGQYPdr9Shh3ECAp8wmSfDVO8MHaLmzgo3RXeqlqFldRjQzDHtCaGhjD9bHKF3yWF2LtH4gUN-Sf--86lcq7iwHDSDm656P_FBfYmE7rA0svH-m3hQoBhza4CKJ7s7f7ZymEhcHAfH7SPImZ3Y-kT_Sy1mbCCf3Yg8uitrpX7ukO6_bIANS_R4oiOPcuLixbWY0ZSyq8ERB5fa5EsIUm7PpGxbO96nmk5rPkewyB4gCtslwJI0Ye7zHtqrDBz1j1nsjIKsRCfFWlUdRF8J1JPiiBSvP8SraQ_94cnKBCsl34BGsVm-R1_ULbuyahBzSHq2Kwr0XQuNLdGChyLKS_FZVT58kbRFsvjZnbalAZ-k9alMeZ-pdWX5f9nSn3w7fz675zOxnBaqiZmoWHXFNOBVGH7gkz05ynJ2B8j_RpdRNJKXUN8pAvf595HGl2IPdaDhqoeS2_3jixO5mmxZuPEdzopoBFRarWud99mxH-mYxWJzKiA1pLNqj7SO93p2-jB-jtsCfZfk6bVEWpRRkIEz0XvxffFTVuGUCqpGS7FiFZc4pQU24pCrdpg2w3xeDSrmfHDAx2vUvv0iRBnQxTTWx2-de2TQQTpR5tjFNyOhYGVn1OXqkbkNtIUHdnNGA1QBCU0Qs0471Ss1CrxXIeeNVSTd00jiu4_ELk6nJYgSpmS8G_crrDza8mRLV5Yk0ItRrZj6pwKUOEaYeyM-RHyhrjf09yaf7Qc3sAozQF0aXFCQjSYiVb98DuGH28HLUxW9ulmSKKR4pYKlCOLNGm0h_gWCpSa0H1HXCgEoPn68HyaJogv_xH3k4ERYyJnxu8zVbVPMGoa9q9nNRQQ9Ks2AvxYRQeGFSCTACBmuookvHsO1zjYfHNuSCD7pCLRFE76KlmSiAX6l9LNOq_xe9Oos-1AvcZHkmVsuh-mjTVkBOjG6zmnHiNJirBpORs_UWL5lmlQBeaXgdHxcb4tHIn8XYXFkQiC4b4pw==', + provider_name='openai', + ), + BuiltinToolCallPart( + tool_name='mcp_server', + args={ + 'action': 'call_tool', + 'server_id': 'google_calendar', + 'tool_name': 'search_events', + 'tool_args': { + 'time_min': '2025-10-23T00:00:00', + 'time_max': '2025-10-23T23:59:59', + 'timezone_str': 'America/Los_Angeles', + 'max_results': 50, + 'query': None, + 'calendar_id': 'primary', + 'next_page_token': None, + }, + }, + tool_call_id='mcp_0558010cf1416a490068faa0fdf64481a085a3e5b8f7d6559a', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'output': None, + 'error': { + 'code': 500, + 'message': 'An unknown error occurred while executing the tool.', + 'type': 'http_error', + }, + }, + tool_call_id='mcp_0558010cf1416a490068faa0fdf64481a085a3e5b8f7d6559a', + timestamp=IsDatetime(), + provider_name='openai', + ), + ThinkingPart( + content='', + id='rs_0558010cf1416a490068faa0ff5c9081a0b156a84d46e5d787', + signature='gAAAAABo-qEE72KCH4RlulMdH6cOTaOQwFy4of4dPd8YlZ-zF9MIsPbumWO2qYlZdGjIIXDJTrlRh_5FJv2LtTmMbdbbECA20AzFMwE4pfNd2aNLC5RhcHKa4M9acC1wYKAddqEOPP7ETVNBj-GMx-tMT_CY8XnBLWvSwPpcfde9E--kSrfsgvRn1umqDsao4sLlAtV-9Gc6hmW1P9CSJDQbHWkdTKMV-cjQ-wZHFCly5kSdIW4OKluFuFRPkrXs7kVmlGnMr8-Q5Zuu1ZOFR9mPvpu2JdxAFohjioM-ftjeBuBWVJvOrIF4nV-yIVHVT-_psAZaPUUB5cyPAtqpoxxIV3iPKPU8DHctP03g_0R6pSWWHhggvO5PBw3zyPwtBwOrHBipc4nQEWEMxZxLH5SYJauTKwHNOx9NyCq8JUjZXM_v4xsGxNa4cAp7GuXqR2YyW2sx7syRUiDwtebh0xk_YOQtkv8tAjzCofmaz3n8FJ2nGSXkilaV5Q8LUNO-9-D2tsAaScDVMuLMMAHFNp_GPplWrmGES4mTCNtTXWyF1GLcQBw8dYYctV66Ocy2_zxyDoB7SsR5htlV77nJ6u1Hbp3tk26LutDrhAhe55xcki8iblHbXNY9MRzR1SS5Zk3-dv0ex4QOzC663NvS9aK3olQbKYko5TvM7Pq4MFYfaxwFTVFVEdaskoDJieVyikz0ZzBjTsItIwL-Q2BVN2F_P_wgCV5hyDclNMPEGTMxajxfIFv-oEunmHY1_RJavl47iXWS8H3JWAvp-9YYQdTS4Aa6m5zPndvHOvEV355UawLHRPctHFUS7rE7rYmcU6KQaqC96JRM0KRfXNIgYtNfw6cxgnyqGxzTF7qeeVzObOqoQmz59Rh0U9ti37vqHb8Ca43-q2Gx2KaVZFj7MBQK8UodfaDRIEuyMB3XNfckxCefwHs7FeAj5NuNDBrm0uDcwJjs2JfY2i54gAES8kAPLGJgRpq_qdjVXqpO6W0H9E1vBdRem7zLPYbA8OOo-KCkRW4AFCVbgCpgIvo4GDNvFOMksl-d8zgQU2qroUWJRu58j1bdaar7Zlfxk0UR33nROmJpXGb_R-RCNAN1ZxJTdEU_dVfyLCeuIXPsnO-FlfO8J6Un3WWPNLuN_bDS5RocniI_ms71qLsisJQiPTs-JDFl-eMM2Hk3QqSCC6OT0CLG9XMmI_zva9yp2joQ8HdGMddE3FDCbLejRrx8fV-9Nd0tZ7SYjFG78_fre8IfL0L67CK1JIPYzhgRZgCb-FFwUy-stR_BstIn0sRr_tDCoHdxuoVCh0dZfTY1p27xbKQ50svHxp1caNp3uze0wLXP9STNouFjFpdIHMsDRaGfO9R9mMmUsFcmBMK3aikuHTpebyL1CeZsIzH2cbZLPRx3pN2IqJ-5h6-cORHuMqf3ysEEFCjXnqmzvWPuBjYDsxnxA1awaGkYKsKhqchgakrfplOjdG5tSkklggBJA93iRaUWIR-4oV6HkkrnpdK1w7BL_VT8upqZmkpHZtZCDSgINk5S5hoYPLBTtS3dcCmQIbLvPXPuGzdAZxl0bhD4Rm3GPDFszaDoFK0Jszcjlaf4SJqyZABKEf71dDbi1as-2Qwr4fxBiQIOsF8ChbYo6Z2iFtUpBnbruFUIwB5QyKfWnwEZbOgf4UbIvIqNMkTzMc8tJgz6Ddqfih8VeNH3v8_84J6vHU0SVm_gvkgQ6P6N_6r5LwNdlAEff0hFwn-aTHWZ3s8MICckUZj97lKoZxAl91WlsKa0yrLw24dxvJ6bhZf0FsOitUJGd7vFPx0TxSobUkzE2RrbQ3hziPxw2Gins4aI6YG3M1gfumd3MgdH-fYBvZulJ9vmw0ZC1Dqh6BkCWHOFKsnpQvHmYuyTzUmnYuJf8N5j_b9XNw0krmxouOCPQClFmIOBLw8XPbe3xf0F5JP7BC0PpjlPT33A5Z6Za5zlA5O-DE_Wp0WG885-GaKtZI-zBZW3R0lc9A4s0HbxqA3lqH8leXOCe6WO46Z_iTQlALpTR-7oaHqzTegq0KSmEjCFO-jLSrVZnBOQ4ddTvLj4ASsQbj-o6TFUFVZAKSLI3FtWovHw02Gc_D0luFz9TbfaXM-EapEQYajkG0_b_nSCoPq0T9HSyvU4oCxXyQvhwIgzbijR-BheN6a_l6hiqZCw9L1c8MdPRtjpbHtEwWkpQ62s8XdydeJnV5vJYp9ezBbS_vWQ7Nz1siai6epJTdzDkRm-dudVhKzdohwg-FOQ-5gSrvoPS_MF4lZvah3iXY1g4uePO4eNDWGJ74YPybiy', + provider_name='openai', + ), + BuiltinToolCallPart( + tool_name='mcp_server', + args={ + 'action': 'call_tool', + 'server_id': 'google_calendar', + 'tool_name': 'search_events', + 'tool_args': { + 'time_min': '2025-10-23T00:00:00Z', + 'time_max': '2025-10-23T23:59:59Z', + 'timezone_str': None, + 'max_results': 50, + 'query': None, + 'calendar_id': 'primary', + 'next_page_token': None, + }, + }, + tool_call_id='mcp_0558010cf1416a490068faa102400481a09fa35e84bc26c170', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='mcp_server', + content={ + 'output': None, + 'error': { + 'code': 500, + 'message': 'An unknown error occurred while executing the tool.', + 'type': 'http_error', + }, + }, + tool_call_id='mcp_0558010cf1416a490068faa102400481a09fa35e84bc26c170', + timestamp=IsDatetime(), + provider_name='openai', + ), + ThinkingPart( + content='', + id='rs_0558010cf1416a490068faa102d89481a0b74cca04bcb8f127', + signature='gAAAAABo-qEECuiSxfvrR92v1hkqyCTCWyfmpHSaW-vVouk5mOTIFDvaBZdVTFH8-dJfpwEG3MCejRKh9V-I8mrYAjhudVr1ayHo8UYOOU1cfVc6w3wsrkL8hXljjE-amiJhBSjvRc2nwwGtgYpDxOfWTqJkaUvFnMD6MrS4CwMrCBbDOLYZgM1cQbidtrrtpP7D5u42tR6coC_PCOqwPzDN4f0RggrxVxh0038p81VUmlkUeA2jWzRyFpeDGRjXFk84Og73rXAp7EWQv7TmzgVXBjCVwwzJNU8HCZ_gkwh5dvL94QxBx32lEmfOOKcqA3hN3FLwDqXlZ8f7jEqYInnpILQgX5XMdM9OrCyXmDCr_eIy00cjvxnTcXhCnZBOaKCKmTP74yUpGNdLbQcr4BalTiviNYEeCAhJyRo4KnhUZbBoT7MB5NULf-kqhRo1gEGKjWiLdV47PhR7Z8i4BK7zBceganMKpLtzIMW5a6JAujC4Z9FYxcpJZI_CD9NHsPr4SjKgIwv89d6BYo89-xfflF6ZUZBkuDUnL2-Nc9CKgGuKlcDunvYLr38pzA278OFYzh9T42u4SbS8KkSXKjGU3H8LfpMnBEZigriixLt5vj7qnWmZvCFarzxT4U4qqR1ITp5rkO6G9kYvBEfS7wu768mteDBgAajUaeOMQEfjJRErC4wfzbB89YCsXPJz0JE90QZ5LeiP5ZlVezTTaddG9JmiGsBCPckqUb1LWdpvekCfPkePF_uDMVWyJpQ4ZBzQsZx8sHf5spygsiQjlzTiriqwhoTcPuXoONoCr9HeFX1Qy8SGOm87siRPAD7FHJdDxbJwq8tOlMpx8MH1dqEY07lwoxZB0GQ9XbB7QJXfQR_27nkpqBYFkrbqChNJLO2x8gNFClbB0mgYQE1CRy64y6yOrG3CtS53RK5VGrF1GnqwuWdZ452VgShT5nAmPFRlRk1S9px4eMUTAozT0QAYrlHQC7b6I6K3m_Qe3kXGpnn_87i2eGG8mHmXG2FvFChkgf2OU7-LRy_Wl_u-ataICeoBwfngBFMppvUW6tJP009HK7mUE8P1KJntN3ExKLIBhmKhV6ziBpIi1bSTmd8leYqfSaf648c7-sVuDRx7DzxTp19l3fwVFa67GdiagZFs7xaU1HxMnMc3uy5VKWAH_qcv-Mga3VCTtTPpMTjvB95nsLeOFjS2FtpPvaP0N6o5kkkzW7cteWpOHhSX0z7AQA7CqgOCQLfLUc7ltVxnOH4WdHoeZFah_q_Ue6caf0kNo4YsTfbRDdzsW70o8P5Agr-Pgttg19vTDA_eBFur9GDKIRT0vYMWPpykwJBDTgJKOFW6uyNkqNWk_RAAvleE9pAyOoSmgomyrMcnnpdeYHNxeNxvTWFC3mcKSjJIB316wypPvaGTJyaK_pxJScD7CtLrIPkgwPpOsJnDySF6wGe-fGsUMt3zxJrc-S6fp24mYVfTRZbjUsP0fJgLmCohJiAtEg_xvlQ8sPyuLoLdOdossTQ7ufl0CwVn4f_ol4q__gpTvYVaoGsWl3QmHul5zj7OUAn7of6iBfCSlXbrauJvMyNYt4x_dLM8SXTRNPe-ZMDmER9DOw0KJXcUrpl6uw4TphKmUOK6KrxqshujXdN9VDgOwD7eKqIHpvC_6a2R6sS6ZHcebmh2o3bic-Hctomrbv03OQ==', + provider_name='openai', + ), + TextPart( + content=IsStr(), + id='msg_0558010cf1416a490068faa103e6c481a0930eda4f04bb3f2a', + ), + ], + usage=RequestUsage(input_tokens=1065, output_tokens=760, details={'reasoning_tokens': 576}), + model_name='o4-mini-2025-04-16', + timestamp=IsDatetime(), + provider_name='openai', + provider_details={'finish_reason': 'completed'}, + provider_response_id='resp_0558010cf1416a490068faa0f945bc81a0b6a6dfb7391030d5', + finish_reason='stop', + ), + ] + ) diff --git a/tests/test_examples.py b/tests/test_examples.py index 12b0c745ad..90ad8bb73e 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -339,6 +339,7 @@ async def call_tool( 'What was his most famous equation?': "Albert Einstein's most famous equation is (E = mc^2).", 'What is the date?': 'Hello Frank, the date today is 2032-01-02.', 'What is this? https://ai.pydantic.dev': 'A Python agent framework for building Generative AI applications.', + 'Give me some examples of my products.': 'Here are some examples of my data: Pen, Paper, Pencil.', 'Put my money on square eighteen': ToolCallPart( tool_name='roulette_wheel', args={'square': 18}, tool_call_id='pyd_ai_tool_call_id' ), @@ -519,6 +520,8 @@ async def call_tool( ), 'Remember that I live in Mexico City': "Got it! I've recorded that you live in Mexico City. I'll remember this for future reference.", 'Where do I live?': 'You live in Mexico City.', + 'Tell me about the pydantic/pydantic-ai repo.': 'The pydantic/pydantic-ai repo is a Python agent framework for building Generative AI applications.', + 'What do I have on my calendar today?': "You're going to spend all day playing with Pydantic AI.", } tool_responses: dict[tuple[str, str], str] = {