diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index eb4f123f42..0dafc1266f 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -97,7 +97,7 @@ jobs: id: azure-functions-setup - name: Test with pytest timeout-minutes: 10 - run: uv run poe all-tests -n logical --dist loadfile --dist worksteal --timeout 300 --retries 3 --retry-delay 10 + run: uv run poe all-tests -n logical --dist loadfile --dist worksteal --timeout 600 --retries 3 --retry-delay 10 working-directory: ./python - name: Test core samples timeout-minutes: 10 diff --git a/docs/decisions/0011-python-typeddict-options.md b/docs/decisions/0011-python-typeddict-options.md new file mode 100644 index 0000000000..09657b2cfb --- /dev/null +++ b/docs/decisions/0011-python-typeddict-options.md @@ -0,0 +1,129 @@ +--- +# These are optional elements. Feel free to remove any of them. +status: proposed +contact: eavanvalkenburg +date: 2026-01-08 +deciders: eavanvalkenburg, markwallace-microsoft, sphenry, alliscode, johanst, brettcannon +consulted: taochenosu, moonbox3, dmytrostruk, giles17 +--- + +# Leveraging TypedDict and Generic Options in Python Chat Clients + +## Context and Problem Statement + +The Agent Framework Python SDK provides multiple chat client implementations for different providers (OpenAI, Anthropic, Azure AI, Bedrock, Ollama, etc.). Each provider has unique configuration options beyond the common parameters defined in `ChatOptions`. Currently, developers using these clients lack type safety and IDE autocompletion for provider-specific options, leading to runtime errors and a poor developer experience. + +How can we provide type-safe, discoverable options for each chat client while maintaining a consistent API across all implementations? + +## Decision Drivers + +- **Type Safety**: Developers should get compile-time/static analysis errors when using invalid options +- **IDE Support**: Full autocompletion and inline documentation for all available options +- **Extensibility**: Users should be able to define custom options that extend provider-specific options +- **Consistency**: All chat clients should follow the same pattern for options handling +- **Provider Flexibility**: Each provider can expose its unique options without affecting the common interface + +## Considered Options + +- **Option 1: Status Quo - Class `ChatOptions` with `**kwargs`** +- **Option 2: TypedDict with Generic Type Parameters** + +### Option 1: Status Quo - Class `ChatOptions` with `**kwargs` + +The current approach uses a base `ChatOptions` Class with common parameters, and provider-specific options are passed via `**kwargs` or loosely typed dictionaries. + +```python +# Current usage - no type safety for provider-specific options +response = await client.get_response( + messages=messages, + temperature=0.7, + top_k=40, + random=42, # No validation +) +``` + +**Pros:** +- Simple implementation +- Maximum flexibility + +**Cons:** +- No type checking for provider-specific options +- No IDE autocompletion for available options +- Runtime errors for typos or invalid options +- Documentation must be consulted for each provider + +### Option 2: TypedDict with Generic Type Parameters (Chosen) + +Each chat client is parameterized with a TypeVar bound to a provider-specific `TypedDict` that extends `ChatOptions`. This enables full type safety and IDE support. + +```python +# Provider-specific TypedDict +class AnthropicChatOptions(ChatOptions, total=False): + """Anthropic-specific chat options.""" + top_k: int + thinking: ThinkingConfig + # ... other Anthropic-specific options + +# Generic chat client +class AnthropicChatClient(ChatClientBase[TAnthropicChatOptions]): + ... + +client = AnthropicChatClient(...) + +# Usage with full type safety +response = await client.get_response( + messages=messages, + options={ + "temperature": 0.7, + "top_k": 40, + "random": 42, # fails type checking and IDE would flag this + } +) + +# Users can extend for custom options +class MyAnthropicOptions(AnthropicChatOptions, total=False): + custom_field: str + + +client = AnthropicChatClient[MyAnthropicOptions](...) + +# Usage of custom options with full type safety +response = await client.get_response( + messages=messages, + options={ + "temperature": 0.7, + "top_k": 40, + "custom_field": "value", + } +) + +``` + +**Pros:** +- Full type safety with static analysis +- IDE autocompletion for all options +- Compile-time error detection +- Self-documenting through type hints +- Users can extend options for their specific needs or advances in models + +**Cons:** +- More complex implementation +- Some type: ignore comments needed for TypedDict field overrides +- Minor: Requires TypeVar with default (Python 3.13+ or typing_extensions) + +> [NOTE!] +> In .NET this is already achieved through overloads on the `GetResponseAsync` method for each provider-specific options class, e.g., `AnthropicChatOptions`, `OpenAIChatOptions`, etc. So this does not apply to .NET. + +### Implementation Details + +1. **Base Protocol**: `ChatClientProtocol[TOptions]` is generic over options type, with default set to `ChatOptions` (the new TypedDict) +2. **Provider TypedDicts**: Each provider defines its options extending `ChatOptions` + They can even override fields with type=None to indicate they are not supported. +3. **TypeVar Pattern**: `TProviderOptions = TypeVar("TProviderOptions", bound=TypedDict, default=ProviderChatOptions, contravariant=True)` +4. **Option Translation**: Common options are kept in place,and explicitly documented in the Options class how they are used. (e.g., `user` → `metadata.user_id`) in `_prepare_options` (for Anthropic) to preserve easy use of common options. + +## Decision Outcome + +Chosen option: **"Option 2: TypedDict with Generic Type Parameters"**, because it provides full type safety, excellent IDE support with autocompletion, and allows users to extend provider-specific options for their use cases. Extended this Generic to ChatAgents in order to also properly type the options used in agent construction and run methods. + +See [typed_options.py](../../python/samples/getting_started/chat_client/typed_options.py) for a complete example demonstrating the usage of typed options with custom extensions. diff --git a/python/.cspell.json b/python/.cspell.json index 3fea304d38..73588b3b35 100644 --- a/python/.cspell.json +++ b/python/.cspell.json @@ -25,6 +25,7 @@ "words": [ "aeiou", "aiplatform", + "agui", "azuredocindex", "azuredocs", "azurefunctions", diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py index cd85509a40..2bdc5c6198 100644 --- a/python/packages/a2a/agent_framework_a2a/_agent.py +++ b/python/packages/a2a/agent_framework_a2a/_agent.py @@ -189,7 +189,7 @@ async def __aexit__( async def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -216,7 +216,7 @@ async def run( async def run_stream( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, diff --git a/python/packages/ag-ui/agent_framework_ag_ui/__init__.py b/python/packages/ag-ui/agent_framework_ag_ui/__init__.py index 631168067c..c6dc575d36 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/__init__.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/__init__.py @@ -16,7 +16,7 @@ from ._endpoint import add_agent_framework_fastapi_endpoint from ._event_converters import AGUIEventConverter from ._http_service import AGUIHttpService -from ._types import AGUIRequest +from ._types import AgentState, AGUIChatOptions, AGUIRequest, PredictStateConfig, RunMetadata try: __version__ = importlib.metadata.version(__name__) @@ -30,11 +30,15 @@ "AgentFrameworkAgent", "add_agent_framework_fastapi_endpoint", "AGUIChatClient", + "AGUIChatOptions", "AGUIEventConverter", "AGUIHttpService", "AGUIRequest", + "AgentState", "ConfirmationStrategy", "DefaultConfirmationStrategy", + "PredictStateConfig", + "RunMetadata", "TaskPlannerConfirmationStrategy", "RecipeConfirmationStrategy", "DocumentWriterConfirmationStrategy", diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_client.py b/python/packages/ag-ui/agent_framework_ag_ui/_client.py index db2f160a9d..e31036803c 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_client.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_client.py @@ -4,17 +4,17 @@ import json import logging +import sys import uuid from collections.abc import AsyncIterable, MutableSequence from functools import wraps -from typing import Any, TypeVar, cast +from typing import TYPE_CHECKING, Any, Generic, cast import httpx from agent_framework import ( AIFunction, BaseChatClient, ChatMessage, - ChatOptions, ChatResponse, ChatResponseUpdate, DataContent, @@ -30,6 +30,26 @@ from ._message_adapters import agent_framework_messages_to_agui from ._utils import convert_tools_to_agui_format +if TYPE_CHECKING: + from ._types import AGUIChatOptions + +from typing import TypedDict + +if sys.version_info >= (3, 13): + from typing import TypeVar +else: + from typing_extensions import TypeVar + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + +if sys.version_info >= (3, 11): + from typing import Self # pragma: no cover +else: + from typing_extensions import Self # pragma: no cover + logger: logging.Logger = logging.getLogger(__name__) @@ -55,7 +75,14 @@ def _unwrap_server_function_call_contents(contents: MutableSequence[Contents | d contents[idx] = content.function_call_content # type: ignore[assignment] -TBaseChatClient = TypeVar("TBaseChatClient", bound=type[BaseChatClient]) +TBaseChatClient = TypeVar("TBaseChatClient", bound=type[BaseChatClient[Any]]) + +TAGUIChatOptions = TypeVar( + "TAGUIChatOptions", + bound=TypedDict, # type: ignore[valid-type] + default="AGUIChatOptions", + covariant=True, +) def _apply_server_function_call_unwrap(chat_client: TBaseChatClient) -> TBaseChatClient: @@ -91,7 +118,7 @@ async def response_wrapper(self, *args: Any, **kwargs: Any) -> ChatResponse: @use_function_invocation @use_instrumentation @use_chat_middleware -class AGUIChatClient(BaseChatClient): +class AGUIChatClient(BaseChatClient[TAGUIChatOptions], Generic[TAGUIChatOptions]): """Chat client for communicating with AG-UI compliant servers. This client implements the BaseChatClient interface and automatically handles: @@ -168,6 +195,19 @@ class AGUIChatClient(BaseChatClient): async with AGUIChatClient(endpoint="http://localhost:8888/") as client: response = await client.get_response("Hello!") print(response.messages[0].text) + + Using custom ChatOptions with type safety: + + .. code-block:: python + + from typing import TypedDict + from agent_framework_ag_ui import AGUIChatClient, AGUIChatOptions + + class MyOptions(AGUIChatOptions, total=False): + my_custom_option: str + + client: AGUIChatClient[MyOptions] = AGUIChatClient(endpoint="http://localhost:8888/") + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ OTEL_PROVIDER_NAME = "agui" @@ -201,7 +241,7 @@ async def close(self) -> None: """Close the HTTP client.""" await self._http_service.close() - async def __aenter__(self) -> "AGUIChatClient": + async def __aenter__(self) -> Self: """Enter async context manager.""" return self @@ -280,36 +320,38 @@ def _convert_messages_to_agui_format(self, messages: list[ChatMessage]) -> list[ """ return agent_framework_messages_to_agui(messages) - def _get_thread_id(self, chat_options: ChatOptions) -> str: + def _get_thread_id(self, options: dict[str, Any]) -> str: """Get or generate thread ID from chat options. Args: - chat_options: Chat options containing metadata + options: Chat options containing metadata Returns: Thread ID string """ thread_id = None - if chat_options.metadata: - thread_id = chat_options.metadata.get("thread_id") + metadata = options.get("metadata") + if metadata: + thread_id = metadata.get("thread_id") if not thread_id: thread_id = f"thread_{uuid.uuid4().hex}" return thread_id + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: """Internal method to get non-streaming response. Keyword Args: messages: List of chat messages - chat_options: Chat options for the request + options: Chat options for the request **kwargs: Additional keyword arguments Returns: @@ -318,23 +360,24 @@ async def _inner_get_response( return await ChatResponse.from_chat_response_generator( self._inner_get_streaming_response( messages=messages, - chat_options=chat_options, + options=options, **kwargs, ) ) + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: """Internal method to get streaming response. Keyword Args: messages: List of chat messages - chat_options: Chat options for the request + options: Chat options for the request **kwargs: Additional keyword arguments Yields: @@ -342,20 +385,21 @@ async def _inner_get_streaming_response( """ messages_to_send, state = self._extract_state_from_messages(messages) - thread_id = self._get_thread_id(chat_options) + thread_id = self._get_thread_id(options) run_id = f"run_{uuid.uuid4().hex}" agui_messages = self._convert_messages_to_agui_format(messages_to_send) # Send client tools to server so LLM knows about them # Client tools execute via ChatAgent's @use_function_invocation wrapper - agui_tools = convert_tools_to_agui_format(chat_options.tools) + agui_tools = convert_tools_to_agui_format(options.get("tools")) # Build set of client tool names (matches .NET clientToolSet) # Used to distinguish client vs server tools in response stream client_tool_set: set[str] = set() - if chat_options.tools: - for tool in chat_options.tools: + tools = options.get("tools") + if tools: + for tool in tools: if hasattr(tool, "name"): client_tool_set.add(tool.name) # type: ignore[arg-type] self._last_client_tool_set = client_tool_set # type: ignore[attr-defined] diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py index 977c276627..d88152beb5 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py @@ -13,7 +13,7 @@ def collect_server_tools(agent: Any) -> list[Any]: """Collect server tools from ChatAgent or duck-typed agent.""" if isinstance(agent, ChatAgent): - tools_from_agent = agent.chat_options.tools + tools_from_agent = agent.default_options.get("tools") server_tools = list(tools_from_agent) if tools_from_agent else [] logger.info(f"[TOOLS] Agent has {len(server_tools)} configured tools") for tool in server_tools: @@ -23,9 +23,11 @@ def collect_server_tools(agent: Any) -> list[Any]: return server_tools try: - chat_options_attr = getattr(agent, "chat_options", None) - if chat_options_attr is not None: - return getattr(chat_options_attr, "tools", None) or [] + default_options_attr = getattr(agent, "default_options", None) + if default_options_attr is not None: + if isinstance(default_options_attr, dict): + return default_options_attr.get("tools") or [] + return getattr(default_options_attr, "tools", None) or [] except AttributeError: return [] return [] diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py index 3067e3e4a7..feba6f8a29 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py @@ -319,7 +319,7 @@ async def run( response_format = None if isinstance(context.agent, ChatAgent): - response_format = context.agent.chat_options.response_format + response_format = context.agent.default_options.get("response_format") skip_text_content = response_format is not None client_tools = convert_agui_tools_to_agent_framework(context.input_data.get("tools")) @@ -434,10 +434,10 @@ async def run( run_kwargs: dict[str, Any] = { "thread": thread, "tools": tools_param, - "metadata": safe_metadata, + "options": {"metadata": safe_metadata}, } if safe_metadata: - run_kwargs["store"] = True + run_kwargs["options"]["store"] = True async def _resolve_approval_responses( messages: list[Any], diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_types.py b/python/packages/ag-ui/agent_framework_ag_ui/_types.py index b404f6d57e..226abae692 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_types.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_types.py @@ -2,8 +2,23 @@ """Type definitions for AG-UI integration.""" +import sys from typing import Any, TypedDict +from agent_framework import ChatOptions + +if sys.version_info >= (3, 13): + from typing import TypeVar +else: + from typing_extensions import TypeVar + +__all__ = [ + "AGUIChatOptions", + "AgentState", + "PredictStateConfig", + "RunMetadata", +] + from pydantic import BaseModel, Field @@ -48,3 +63,76 @@ class AGUIRequest(BaseModel): None, description="Optional shared state for agentic generative UI", ) + + +# region AG-UI Chat Options TypedDict + + +class AGUIChatOptions(ChatOptions, total=False): + """AG-UI protocol-specific chat options dict. + + Extends base ChatOptions for the AG-UI (Agent-UI) protocol. + AG-UI is a streaming protocol for connecting AI agents to user interfaces. + Options are forwarded to the remote AG-UI server. + + See: https://github.com/ag-ui/ag-ui-protocol + + Keys: + # Inherited from ChatOptions (forwarded to remote server): + model_id: The model identifier (forwarded as-is to server). + temperature: Sampling temperature. + top_p: Nucleus sampling parameter. + max_tokens: Maximum tokens to generate. + stop: Stop sequences. + tools: List of tools - sent to server so LLM knows about client tools. + Server executes its own tools; client tools execute locally via + @use_function_invocation middleware. + tool_choice: How the model should use tools. + metadata: Metadata dict containing thread_id for conversation continuity. + + # Options with limited support (depends on remote server): + frequency_penalty: Forwarded if remote server supports it. + presence_penalty: Forwarded if remote server supports it. + seed: Forwarded if remote server supports it. + response_format: Forwarded if remote server supports it. + logit_bias: Forwarded if remote server supports it. + user: Forwarded if remote server supports it. + + # Options not typically used in AG-UI: + store: Not applicable for AG-UI protocol. + allow_multiple_tool_calls: Handled by underlying server. + + # AG-UI-specific options: + forward_props: Additional properties to forward to the AG-UI server. + Useful for passing custom parameters to specific server implementations. + context: Shared context/state to send to the server. + + Note: + AG-UI is a protocol bridge - actual option support depends on the + remote server implementation. The client sends all options to the + server, which decides how to handle them. + + Thread ID management: + - Pass ``thread_id`` in ``metadata`` to maintain conversation continuity + - If not provided, a new thread ID is auto-generated + """ + + # AG-UI-specific options + forward_props: dict[str, Any] + """Additional properties to forward to the AG-UI server.""" + + context: dict[str, Any] + """Shared context/state to send to the server.""" + + # ChatOptions fields not applicable for AG-UI + store: None # type: ignore[misc] + """Not applicable for AG-UI protocol.""" + + +AGUI_OPTION_TRANSLATIONS: dict[str, str] = {} +"""Maps ChatOptions keys to AG-UI parameter names (protocol uses standard names).""" + +TAGUIChatOptions = TypeVar("TAGUIChatOptions", bound=TypedDict, default="AGUIChatOptions", covariant=True) # type: ignore[valid-type] + + +# endregion diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py index ab7a3533cd..dbfdab5272 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py @@ -3,6 +3,7 @@ """Human-in-the-loop agent demonstrating step customization (Feature 5).""" from enum import Enum +from typing import Any from agent_framework import ChatAgent, ChatClientProtocol, ai_function from pydantic import BaseModel, Field @@ -42,7 +43,7 @@ def generate_task_steps(steps: list[TaskStep]) -> str: return f"Generated {len(steps)} execution steps for the task." -def human_in_the_loop_agent(chat_client: ChatClientProtocol) -> ChatAgent: +def human_in_the_loop_agent(chat_client: ChatClientProtocol[Any]) -> ChatAgent[Any]: """Create a human-in-the-loop agent using tool-based approach for predictive state. Args: diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py index 051937f2a9..05c42efb30 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py @@ -3,6 +3,7 @@ """Recipe agent example demonstrating shared state management (Feature 3).""" from enum import Enum +from typing import Any from agent_framework import ChatAgent, ChatClientProtocol, ai_function from agent_framework.ag_ui import AgentFrameworkAgent, RecipeConfirmationStrategy @@ -101,7 +102,7 @@ def update_recipe(recipe: Recipe) -> str: """ -def recipe_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgent: +def recipe_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: """Create a recipe agent with streaming state updates. Args: diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py index ad5c4f425c..52515bc0a4 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py @@ -3,6 +3,7 @@ """Example agent demonstrating agentic generative UI with custom events during execution.""" import asyncio +from typing import Any from agent_framework import ChatAgent, ChatClientProtocol, ai_function from agent_framework.ag_ui import AgentFrameworkAgent @@ -87,8 +88,8 @@ async def analyze_data(dataset: str) -> str: ) -def research_assistant_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgent: - """Create a research assistant agent with progress events. +def research_assistant_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: + """Create a research assistant agent. Args: chat_client: The chat client to use for the agent diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py index e4bffaea0d..3e72fd3a11 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py @@ -2,10 +2,12 @@ """Simple agentic chat example (Feature 1: Agentic Chat).""" +from typing import Any + from agent_framework import ChatAgent, ChatClientProtocol -def simple_agent(chat_client: ChatClientProtocol) -> ChatAgent: +def simple_agent(chat_client: ChatClientProtocol[Any]) -> ChatAgent[Any]: """Create a simple chat agent. Args: @@ -14,7 +16,7 @@ def simple_agent(chat_client: ChatClientProtocol) -> ChatAgent: Returns: A configured ChatAgent instance """ - return ChatAgent( + return ChatAgent[Any]( name="simple_chat_agent", instructions="You are a helpful assistant. Be concise and friendly.", chat_client=chat_client, diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py index 6609f06aa6..c79c36f511 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py @@ -2,6 +2,8 @@ """Example agent demonstrating human-in-the-loop with function approvals.""" +from typing import Any + from agent_framework import ChatAgent, ChatClientProtocol, ai_function from agent_framework.ag_ui import AgentFrameworkAgent, TaskPlannerConfirmationStrategy @@ -59,7 +61,7 @@ def book_meeting_room(room_name: str, date: str, start_time: str, end_time: str) ) -def task_planner_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgent: +def task_planner_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: """Create a task planner agent with user approval for actions. Args: diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py index 567dd348b4..572df2720b 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py @@ -52,7 +52,7 @@ def generate_task_steps(steps: list[TaskStep]) -> str: return "Steps generated." -def _create_task_steps_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgent: +def _create_task_steps_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: """Create the task steps agent using tool-based approach for streaming. Args: @@ -61,7 +61,7 @@ def _create_task_steps_agent(chat_client: ChatClientProtocol) -> AgentFrameworkA Returns: A configured AgentFrameworkAgent instance """ - agent = ChatAgent( + agent = ChatAgent[Any]( name="task_steps_agent", instructions="""You are a helpful assistant that breaks down tasks into actionable steps. @@ -331,7 +331,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non yield run_finished_event -def task_steps_agent_wrapped(chat_client: ChatClientProtocol) -> TaskStepsAgentWithExecution: +def task_steps_agent_wrapped(chat_client: ChatClientProtocol[Any]) -> TaskStepsAgentWithExecution: """Create a task steps agent with execution simulation. Args: diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py index 0a99e6f1a1..db1788fd25 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py @@ -2,11 +2,17 @@ """Example agent demonstrating Tool-based Generative UI (Feature 5).""" -from typing import Any +import sys +from typing import Any, TypedDict -from agent_framework import AIFunction, ChatAgent, ChatClientProtocol +from agent_framework import AIFunction, ChatAgent, ChatClientProtocol, ChatOptions from agent_framework.ag_ui import AgentFrameworkAgent +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore # pragma: no cover + # Declaration-only tools (func=None) - actual rendering happens on the client side generate_haiku = AIFunction[Any, str]( name="generate_haiku", @@ -150,15 +156,17 @@ For other requests, use the appropriate tool (create_chart, display_timeline, show_comparison_table). """ +TOptions = TypeVar("TOptions", bound=TypedDict, default="ChatOptions") # type: ignore[valid-type] + -def ui_generator_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgent: - """Create a UI generator agent with frontend rendering tools. +def ui_generator_agent(chat_client: ChatClientProtocol[TOptions]) -> AgentFrameworkAgent: + """Create a UI generator agent with custom React component rendering. Args: chat_client: The chat client to use for the agent Returns: - A configured AgentFrameworkAgent instance with UI generation tools + A configured AgentFrameworkAgent instance with UI generation capabilities """ agent = ChatAgent( name="ui_generator", @@ -166,7 +174,7 @@ def ui_generator_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgent: chat_client=chat_client, tools=[generate_haiku, create_chart, display_timeline, show_comparison_table], # Force tool usage - the LLM MUST call a tool, cannot respond with plain text - chat_options={"tool_choice": "required"}, + default_options={"tool_choice": "required"}, # type: ignore ) return AgentFrameworkAgent( diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py index 6edaa02616..32324d72eb 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py @@ -57,7 +57,7 @@ def get_forecast(location: str, days: int = 3) -> str: return f"{days}-day forecast for {location}:\n" + "\n".join(forecast) -def weather_agent(chat_client: ChatClientProtocol) -> ChatAgent: +def weather_agent(chat_client: ChatClientProtocol[Any]) -> ChatAgent[Any]: """Create a weather agent with get_weather and get_forecast tools. Args: @@ -66,7 +66,7 @@ def weather_agent(chat_client: ChatClientProtocol) -> ChatAgent: Returns: A configured ChatAgent instance with weather tools """ - return ChatAgent( + return ChatAgent[Any]( name="weather_agent", instructions=( "You are a helpful weather assistant. " diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py index ebfc42ea19..e71abe7507 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py @@ -4,6 +4,7 @@ import logging import os +from typing import TYPE_CHECKING import uvicorn from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint @@ -19,6 +20,10 @@ from ..agents.ui_generator_agent import ui_generator_agent from ..agents.weather_agent import weather_agent +if TYPE_CHECKING: + from agent_framework import ChatOptions + from agent_framework._clients import BaseChatClient + # Configure logging to file and console (disabled by default - set ENABLE_DEBUG_LOGGING=1 to enable) if os.getenv("ENABLE_DEBUG_LOGGING"): log_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "ag_ui_server.log") @@ -60,7 +65,7 @@ # Create a shared chat client for all agents # You can use different chat clients for different agents if needed -chat_client = AzureOpenAIChatClient() +chat_client: BaseChatClient[ChatOptions] = AzureOpenAIChatClient() # Agentic Chat - basic chat agent add_agent_framework_fastapi_endpoint( diff --git a/python/packages/ag-ui/tests/test_ag_ui_client.py b/python/packages/ag-ui/tests/test_ag_ui_client.py index d46561c2c1..bc1cc6d711 100644 --- a/python/packages/ag-ui/tests/test_ag_ui_client.py +++ b/python/packages/ag-ui/tests/test_ag_ui_client.py @@ -40,22 +40,22 @@ def convert_messages_to_agui_format(self, messages: list[ChatMessage]) -> list[d """Expose message conversion helper.""" return self._convert_messages_to_agui_format(messages) - def get_thread_id(self, chat_options: ChatOptions) -> str: + def get_thread_id(self, options: dict[str, Any]) -> str: """Expose thread id helper.""" - return self._get_thread_id(chat_options) + return self._get_thread_id(options) async def inner_get_streaming_response( - self, *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions + self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any] ) -> AsyncIterable[ChatResponseUpdate]: """Proxy to protected streaming call.""" - async for update in self._inner_get_streaming_response(messages=messages, chat_options=chat_options): + async for update in self._inner_get_streaming_response(messages=messages, options=options): yield update async def inner_get_response( - self, *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions + self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any] ) -> ChatResponse: """Proxy to protected response call.""" - return await self._inner_get_response(messages=messages, chat_options=chat_options) + return await self._inner_get_response(messages=messages, options=options) class TestAGUIChatClient: @@ -191,7 +191,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str chat_options = ChatOptions() updates: list[ChatResponseUpdate] = [] - async for update in client.inner_get_streaming_response(messages=messages, chat_options=chat_options): + async for update in client.inner_get_streaming_response(messages=messages, options=chat_options): updates.append(update) assert len(updates) == 4 @@ -221,9 +221,9 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str monkeypatch.setattr(client.http_service, "post_run", mock_post_run) messages = [ChatMessage(role="user", text="Test message")] - chat_options = ChatOptions() + chat_options = {} - response = await client.inner_get_response(messages=messages, chat_options=chat_options) + response = await client.inner_get_response(messages=messages, options=chat_options) assert response is not None assert len(response.messages) > 0 @@ -266,7 +266,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str messages = [ChatMessage(role="user", text="Test with tools")] chat_options = ChatOptions(tools=[test_tool]) - response = await client.inner_get_response(messages=messages, chat_options=chat_options) + response = await client.inner_get_response(messages=messages, options=chat_options) assert response is not None @@ -288,10 +288,9 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str monkeypatch.setattr(client.http_service, "post_run", mock_post_run) messages = [ChatMessage(role="user", text="Test server tool execution")] - chat_options = ChatOptions() updates: list[ChatResponseUpdate] = [] - async for update in client.get_streaming_response(messages, chat_options=chat_options): + async for update in client.get_streaming_response(messages): updates.append(update) function_calls = [ @@ -332,9 +331,8 @@ async def fake_auto_invoke(*args: object, **kwargs: Any) -> None: monkeypatch.setattr(client.http_service, "post_run", mock_post_run) messages = [ChatMessage(role="user", text="Test server tool execution")] - chat_options = ChatOptions(tool_choice="auto", tools=[client_tool]) - async for _ in client.get_streaming_response(messages, chat_options=chat_options): + async for _ in client.get_streaming_response(messages, options={"tool_choice": "auto", "tools": [client_tool]}): pass async def test_state_transmission(self, monkeypatch: MonkeyPatch) -> None: @@ -370,6 +368,6 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str chat_options = ChatOptions() - response = await client.inner_get_response(messages=messages, chat_options=chat_options) + response = await client.inner_get_response(messages=messages, options=chat_options) assert response is not None diff --git a/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py b/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py index a553cc838e..86aee55cac 100644 --- a/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py +++ b/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py @@ -21,11 +21,15 @@ async def test_agent_initialization_basic(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) + agent = ChatAgent[ChatOptions]( + chat_client=StreamingChatClientStub(stream_fn), + name="test_agent", + instructions="Test", + ) wrapper = AgentFrameworkAgent(agent=agent) assert wrapper.name == "test_agent" @@ -39,7 +43,7 @@ async def test_agent_initialization_with_state_schema(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -55,7 +59,7 @@ async def test_agent_initialization_with_predict_state_config(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -71,7 +75,7 @@ async def test_agent_initialization_with_pydantic_state_schema(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -94,7 +98,7 @@ async def test_run_started_event_emission(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -118,7 +122,7 @@ async def test_predict_state_custom_event_emission(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -150,7 +154,7 @@ async def test_initial_state_snapshot_with_schema(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -180,7 +184,7 @@ async def test_state_initialization_object_type(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -207,7 +211,7 @@ async def test_state_initialization_array_type(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -234,7 +238,7 @@ async def test_run_finished_event_emission(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -256,7 +260,7 @@ async def test_tool_result_confirm_changes_accepted(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Document updated")]) @@ -303,7 +307,7 @@ async def test_tool_result_confirm_changes_rejected(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="OK")]) @@ -337,7 +341,7 @@ async def test_tool_result_function_approval_accepted(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="OK")]) @@ -383,7 +387,7 @@ async def test_tool_result_function_approval_rejected(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="OK")]) @@ -422,10 +426,11 @@ async def test_thread_metadata_tracking(): thread_metadata: dict[str, Any] = {} async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - if chat_options.metadata: - thread_metadata.update(chat_options.metadata) + metadata = options.get("metadata") + if metadata: + thread_metadata.update(metadata) yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) @@ -447,15 +452,16 @@ async def stream_fn( async def test_state_context_injection(): """Test that current state is injected into thread metadata.""" - from agent_framework.ag_ui import AgentFrameworkAgent + from agent_framework_ag_ui import AgentFrameworkAgent thread_metadata: dict[str, Any] = {} async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - if chat_options.metadata: - thread_metadata.update(chat_options.metadata) + metadata = options.get("metadata") + if metadata: + thread_metadata.update(metadata) yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) @@ -484,7 +490,7 @@ async def test_no_messages_provided(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) @@ -508,7 +514,7 @@ async def test_message_end_event_emission(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Hello world")]) @@ -536,7 +542,7 @@ async def test_error_handling_with_exception(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: if False: yield ChatResponseUpdate(contents=[]) @@ -557,7 +563,7 @@ async def test_json_decode_error_in_tool_result(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: if False: yield ChatResponseUpdate(contents=[]) @@ -594,7 +600,7 @@ async def test_suppressed_summary_with_document_state(): from agent_framework.ag_ui import AgentFrameworkAgent, DocumentWriterConfirmationStrategy async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="Response")]) @@ -647,7 +653,7 @@ def get_datetime() -> str: return "2025/12/01 12:00:00" async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: # Capture the messages received by the chat client messages_received.clear() @@ -655,9 +661,9 @@ async def stream_fn( yield ChatResponseUpdate(contents=[TextContent(text="Processing completed")]) agent = ChatAgent( + chat_client=StreamingChatClientStub(stream_fn), name="test_agent", instructions="Test", - chat_client=StreamingChatClientStub(stream_fn), tools=[get_datetime], ) wrapper = AgentFrameworkAgent(agent=agent) @@ -738,7 +744,7 @@ def delete_all_data() -> str: return "All data deleted" async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: # Capture the messages received by the chat client messages_received.clear() diff --git a/python/packages/ag-ui/tests/test_orchestrators.py b/python/packages/ag-ui/tests/test_orchestrators.py index a3803f04d0..b1d1a21315 100644 --- a/python/packages/ag-ui/tests/test_orchestrators.py +++ b/python/packages/ag-ui/tests/test_orchestrators.py @@ -22,7 +22,7 @@ class DummyAgent: """Minimal agent stub to capture run_stream parameters.""" def __init__(self) -> None: - self.chat_options = SimpleNamespace(tools=[server_tool], response_format=None) + self.default_options: dict[str, Any] = {"tools": [server_tool], "response_format": None} self.tools = [server_tool] self.chat_client = SimpleNamespace( function_invocation_configuration=FunctionInvocationConfiguration(), diff --git a/python/packages/ag-ui/tests/test_orchestrators_coverage.py b/python/packages/ag-ui/tests/test_orchestrators_coverage.py index e10e84a754..1b63d35bfa 100644 --- a/python/packages/ag-ui/tests/test_orchestrators_coverage.py +++ b/python/packages/ag-ui/tests/test_orchestrators_coverage.py @@ -29,7 +29,7 @@ def approval_tool(param: str) -> str: return f"executed: {param}" -DEFAULT_CHAT_OPTIONS = SimpleNamespace(tools=[approval_tool], response_format=None) +DEFAULT_OPTIONS: dict[str, Any] = {"tools": [approval_tool], "response_format": None} async def test_human_in_the_loop_json_decode_error() -> None: @@ -54,7 +54,7 @@ async def test_human_in_the_loop_json_decode_error() -> None: ] agent = StubAgent( - chat_options=SimpleNamespace(tools=[approval_tool], response_format=None), + default_options={"tools": [approval_tool], "response_format": None}, updates=[AgentRunResponseUpdate(contents=[TextContent(text="response")], role="assistant")], ) context = TestExecutionContext( @@ -106,7 +106,7 @@ async def test_sanitize_tool_history_confirm_changes() -> None: input_data: dict[str, Any] = {"messages": []} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -151,7 +151,7 @@ async def test_sanitize_tool_history_orphaned_tool_result() -> None: orchestrator = DefaultOrchestrator() input_data: dict[str, Any] = {"messages": []} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -191,7 +191,7 @@ async def test_orphaned_tool_result_sanitization() -> None: } agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -234,7 +234,7 @@ async def test_deduplicate_messages_empty_tool_results() -> None: orchestrator = DefaultOrchestrator() input_data: dict[str, Any] = {"messages": []} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -279,7 +279,7 @@ async def test_deduplicate_messages_duplicate_assistant_tool_calls() -> None: orchestrator = DefaultOrchestrator() input_data: dict[str, Any] = {"messages": []} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -323,7 +323,7 @@ async def test_deduplicate_messages_duplicate_system_messages() -> None: orchestrator = DefaultOrchestrator() input_data: dict[str, Any] = {"messages": []} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -362,7 +362,7 @@ async def test_state_context_injection() -> None: } agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -407,7 +407,7 @@ async def test_state_context_injection_with_tool_calls_and_input_state() -> None orchestrator = DefaultOrchestrator() input_data: dict[str, Any] = {"messages": [], "state": {"weather": "sunny"}} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -449,7 +449,7 @@ class RecipeState(BaseModel): # Agent with structured output agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, updates=[ AgentRunResponseUpdate( contents=[TextContent(text='{"ingredients": ["tomato"], "message": "Added tomato"}')], @@ -457,7 +457,7 @@ class RecipeState(BaseModel): ) ], ) - agent.chat_options.response_format = RecipeState + agent.default_options["response_format"] = RecipeState context = TestExecutionContext( input_data=input_data, @@ -510,9 +510,9 @@ def get_weather(location: str) -> str: } agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) - agent.chat_options.tools = [get_weather] + agent.default_options["tools"] = [get_weather] context = TestExecutionContext( input_data=input_data, @@ -559,9 +559,9 @@ def server_tool() -> str: } agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) - agent.chat_options.tools = [server_tool] + agent.default_options["tools"] = [server_tool] context = TestExecutionContext( input_data=input_data, @@ -587,7 +587,7 @@ async def test_empty_messages_handling() -> None: input_data: dict[str, Any] = {"messages": []} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -621,7 +621,7 @@ async def test_all_messages_filtered_handling() -> None: } agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -663,7 +663,7 @@ async def test_confirm_changes_with_invalid_json_fallback() -> None: orchestrator = DefaultOrchestrator() input_data: dict[str, Any] = {"messages": []} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -706,7 +706,7 @@ async def test_confirm_changes_closes_active_message_before_finish() -> None: orchestrator = DefaultOrchestrator() input_data: dict[str, Any] = {"messages": [{"role": "user", "content": "Start"}]} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, updates=updates, ) context = TestExecutionContext( @@ -751,7 +751,7 @@ async def test_tool_result_kept_when_call_id_matches() -> None: orchestrator = DefaultOrchestrator() input_data: dict[str, Any] = {"messages": []} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -781,7 +781,7 @@ class CustomAgent: """Custom agent without ChatAgent type.""" def __init__(self) -> None: - self.chat_options = SimpleNamespace(tools=[], response_format=None) + self.default_options: dict[str, Any] = {"tools": [], "response_format": None} self.chat_client = SimpleNamespace(function_invocation_configuration=SimpleNamespace()) self.messages_received: list[Any] = [] @@ -827,7 +827,7 @@ async def test_initial_state_snapshot_with_array_schema() -> None: orchestrator = DefaultOrchestrator() input_data: dict[str, Any] = {"messages": [], "state": {}} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) context = TestExecutionContext( input_data=input_data, @@ -859,9 +859,9 @@ class OutputModel(BaseModel): input_data: dict[str, Any] = {"messages": []} agent = StubAgent( - chat_options=DEFAULT_CHAT_OPTIONS, + default_options=DEFAULT_OPTIONS, ) - agent.chat_options.response_format = OutputModel + agent.default_options["response_format"] = OutputModel context = TestExecutionContext( input_data=input_data, diff --git a/python/packages/ag-ui/tests/test_structured_output.py b/python/packages/ag-ui/tests/test_structured_output.py index aee8fe6dc8..b9a04353be 100644 --- a/python/packages/ag-ui/tests/test_structured_output.py +++ b/python/packages/ag-ui/tests/test_structured_output.py @@ -40,14 +40,14 @@ async def test_structured_output_with_recipe(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate( contents=[TextContent(text='{"recipe": {"name": "Pasta"}, "message": "Here is your recipe"}')] ) agent = ChatAgent(name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) - agent.chat_options = ChatOptions(response_format=RecipeOutput) + agent.default_options = ChatOptions(response_format=RecipeOutput) wrapper = AgentFrameworkAgent( agent=agent, @@ -78,7 +78,7 @@ async def test_structured_output_with_steps(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: steps_data = { "steps": [ @@ -89,7 +89,7 @@ async def stream_fn( yield ChatResponseUpdate(contents=[TextContent(text=json.dumps(steps_data))]) agent = ChatAgent(name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) - agent.chat_options = ChatOptions(response_format=StepsOutput) + agent.default_options = ChatOptions(response_format=StepsOutput) wrapper = AgentFrameworkAgent( agent=agent, @@ -124,7 +124,7 @@ async def test_structured_output_with_no_schema_match(): agent = ChatAgent( name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_from_updates(updates)) ) - agent.chat_options = ChatOptions(response_format=GenericOutput) + agent.default_options = ChatOptions(response_format=GenericOutput) wrapper = AgentFrameworkAgent( agent=agent, @@ -154,12 +154,12 @@ class DataOutput(BaseModel): info: str async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text='{"data": {"key": "value"}, "info": "processed"}')]) agent = ChatAgent(name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) - agent.chat_options = ChatOptions(response_format=DataOutput) + agent.default_options = ChatOptions(response_format=DataOutput) wrapper = AgentFrameworkAgent( agent=agent, @@ -213,13 +213,13 @@ async def test_structured_output_with_message_field(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: output_data = {"recipe": {"name": "Salad"}, "message": "Fresh salad recipe ready"} yield ChatResponseUpdate(contents=[TextContent(text=json.dumps(output_data))]) agent = ChatAgent(name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) - agent.chat_options = ChatOptions(response_format=RecipeOutput) + agent.default_options = ChatOptions(response_format=RecipeOutput) wrapper = AgentFrameworkAgent( agent=agent, @@ -248,13 +248,13 @@ async def test_empty_updates_no_structured_processing(): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: if False: yield ChatResponseUpdate(contents=[]) agent = ChatAgent(name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) - agent.chat_options = ChatOptions(response_format=RecipeOutput) + agent.default_options = ChatOptions(response_format=RecipeOutput) wrapper = AgentFrameworkAgent(agent=agent) diff --git a/python/packages/ag-ui/tests/utils_test_ag_ui.py b/python/packages/ag-ui/tests/utils_test_ag_ui.py index 3c86c284da..b0f70f3dc8 100644 --- a/python/packages/ag-ui/tests/utils_test_ag_ui.py +++ b/python/packages/ag-ui/tests/utils_test_ag_ui.py @@ -2,9 +2,10 @@ """Shared test stubs for AG-UI tests.""" +import sys from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable, MutableSequence from types import SimpleNamespace -from typing import Any +from typing import Any, Generic from agent_framework import ( AgentProtocol, @@ -13,20 +14,25 @@ AgentThread, BaseChatClient, ChatMessage, - ChatOptions, ChatResponse, ChatResponseUpdate, TextContent, ) +from agent_framework._clients import TOptions_co from agent_framework_ag_ui._message_adapters import _deduplicate_messages, _sanitize_tool_history from agent_framework_ag_ui._orchestrators import ExecutionContext +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + StreamFn = Callable[..., AsyncIterator[ChatResponseUpdate]] ResponseFn = Callable[..., Awaitable[ChatResponse]] -class StreamingChatClientStub(BaseChatClient): +class StreamingChatClientStub(BaseChatClient[TOptions_co], Generic[TOptions_co]): """Typed streaming stub that satisfies ChatClientProtocol.""" def __init__(self, stream_fn: StreamFn, response_fn: ResponseFn | None = None) -> None: @@ -34,20 +40,22 @@ def __init__(self, stream_fn: StreamFn, response_fn: ResponseFn | None = None) - self._stream_fn = stream_fn self._response_fn = response_fn + @override async def _inner_get_streaming_response( - self, *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - async for update in self._stream_fn(messages, chat_options, **kwargs): + async for update in self._stream_fn(messages, options, **kwargs): yield update + @override async def _inner_get_response( - self, *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> ChatResponse: if self._response_fn is not None: - return await self._response_fn(messages, chat_options, **kwargs) + return await self._response_fn(messages, options, **kwargs) contents: list[Any] = [] - async for update in self._stream_fn(messages, chat_options, **kwargs): + async for update in self._stream_fn(messages, options, **kwargs): contents.extend(update.contents) return ChatResponse( @@ -60,7 +68,7 @@ def stream_from_updates(updates: list[ChatResponseUpdate]) -> StreamFn: """Create a stream function that yields from a static list of updates.""" async def _stream( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: for update in updates: yield update @@ -77,14 +85,16 @@ def __init__( *, agent_id: str = "stub-agent", agent_name: str | None = "stub-agent", - chat_options: Any | None = None, + default_options: Any | None = None, chat_client: Any | None = None, ) -> None: self.id = agent_id self.name = agent_name self.description = "stub agent" self.updates = updates or [AgentRunResponseUpdate(contents=[TextContent(text="response")], role="assistant")] - self.chat_options = chat_options or SimpleNamespace(tools=None, response_format=None) + self.default_options: dict[str, Any] = ( + default_options if isinstance(default_options, dict) else {"tools": None, "response_format": None} + ) self.chat_client = chat_client or SimpleNamespace(function_invocation_configuration=None) self.messages_received: list[Any] = [] self.tools_received: list[Any] | None = None diff --git a/python/packages/anthropic/agent_framework_anthropic/__init__.py b/python/packages/anthropic/agent_framework_anthropic/__init__.py index e81064b213..706740a127 100644 --- a/python/packages/anthropic/agent_framework_anthropic/__init__.py +++ b/python/packages/anthropic/agent_framework_anthropic/__init__.py @@ -2,7 +2,7 @@ import importlib.metadata -from ._chat_client import AnthropicClient +from ._chat_client import AnthropicChatOptions, AnthropicClient try: __version__ = importlib.metadata.version(__name__) @@ -10,6 +10,7 @@ __version__ = "0.0.0" # Fallback for development mode __all__ = [ + "AnthropicChatOptions", "AnthropicClient", "__version__", ] diff --git a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py index 81655c16a4..c9223e614b 100644 --- a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py +++ b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py @@ -1,6 +1,8 @@ # Copyright (c) Microsoft. All rights reserved. + +import sys from collections.abc import AsyncIterable, MutableMapping, MutableSequence, Sequence -from typing import Any, ClassVar, Final, TypeVar +from typing import Any, ClassVar, Final, Generic, Literal, TypedDict from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, @@ -49,15 +51,132 @@ BetaTextBlock, BetaUsage, ) -from anthropic.types.beta.beta_bash_code_execution_tool_result_error import BetaBashCodeExecutionToolResultError -from anthropic.types.beta.beta_code_execution_tool_result_error import BetaCodeExecutionToolResultError +from anthropic.types.beta.beta_bash_code_execution_tool_result_error import ( + BetaBashCodeExecutionToolResultError, +) +from anthropic.types.beta.beta_code_execution_tool_result_error import ( + BetaCodeExecutionToolResultError, +) from pydantic import SecretStr, ValidationError +if sys.version_info >= (3, 13): + from typing import TypeVar +else: + from typing_extensions import TypeVar + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + +__all__ = [ + "AnthropicChatOptions", + "AnthropicClient", + "ThinkingConfig", +] + logger = get_logger("agent_framework.anthropic") ANTHROPIC_DEFAULT_MAX_TOKENS: Final[int] = 1024 BETA_FLAGS: Final[list[str]] = ["mcp-client-2025-04-04", "code-execution-2025-08-25"] + +# region Anthropic Chat Options TypedDict + + +class ThinkingConfig(TypedDict, total=False): + """Configuration for enabling Claude's extended thinking. + + When enabled, responses include ``thinking`` content blocks showing Claude's + thinking process before the final answer. Requires a minimum budget of 1,024 + tokens and counts towards your ``max_tokens`` limit. + + See https://docs.claude.com/en/docs/build-with-claude/extended-thinking for details. + + Keys: + type: "enabled" to enable extended thinking, "disabled" to disable. + budget_tokens: The token budget for thinking (minimum 1024, required when type="enabled"). + """ + + type: Literal["enabled", "disabled"] + budget_tokens: int + + +class AnthropicChatOptions(ChatOptions, total=False): + """Anthropic-specific chat options. + + Extends ChatOptions with options specific to Anthropic's Messages API. + Options that Anthropic doesn't support are typed as None to indicate they're unavailable. + + Note: + Anthropic REQUIRES max_tokens to be specified. If not provided, + a default of 1024 will be used. + + Keys: + model_id: The model to use for the request, + translates to ``model`` in Anthropic API. + temperature: Sampling temperature between 0 and 1. + top_p: Nucleus sampling parameter. + max_tokens: Maximum number of tokens to generate (REQUIRED). + stop: Stop sequences, + translates to ``stop_sequences`` in Anthropic API. + tools: List of tools (functions) available to the model. + tool_choice: How the model should use tools. + response_format: Structured output schema. + metadata: Request metadata with user_id for tracking. + user: User identifier, translates to ``metadata.user_id`` in Anthropic API. + instructions: System instructions for the model, + translates to ``system`` in Anthropic API. + top_k: Number of top tokens to consider for sampling. + service_tier: Service tier ("auto" or "standard_only"). + thinking: Extended thinking configuration for Claude models. + When enabled, responses include ``thinking`` content blocks showing Claude's + thinking process before the final answer. Requires a minimum budget of 1,024 + tokens and counts towards your ``max_tokens`` limit. + See https://docs.claude.com/en/docs/build-with-claude/extended-thinking for details. + container: Container configuration for skills. + additional_beta_flags: Additional beta flags to enable on the request. + """ + + # Anthropic-specific generation parameters (supported by all models) + top_k: int + service_tier: Literal["auto", "standard_only"] + + # Extended thinking (Claude models) + thinking: ThinkingConfig + + # Skills + container: dict[str, Any] + + # Beta features + additional_beta_flags: list[str] + + # Unsupported base options (override with None to indicate not supported) + logit_bias: None # type: ignore[misc] + seed: None # type: ignore[misc] + frequency_penalty: None # type: ignore[misc] + presence_penalty: None # type: ignore[misc] + store: None # type: ignore[misc] + + +TAnthropicOptions = TypeVar( + "TAnthropicOptions", + bound=TypedDict, # type: ignore[valid-type] + default="AnthropicChatOptions", + covariant=True, +) + +# Translation between framework options keys and Anthropic Messages API +OPTION_TRANSLATIONS: dict[str, str] = { + "model_id": "model", + "stop": "stop_sequences", + "instructions": "system", +} + + +# region Role and Finish Reason Maps + + ROLE_MAP: dict[Role, str] = { Role.USER: "user", Role.ASSISTANT: "assistant", @@ -111,13 +230,10 @@ class AnthropicSettings(AFBaseSettings): chat_model_id: str | None = None -TAnthropicClient = TypeVar("TAnthropicClient", bound="AnthropicClient") - - @use_function_invocation @use_instrumentation @use_chat_middleware -class AnthropicClient(BaseChatClient): +class AnthropicClient(BaseChatClient[TAnthropicOptions], Generic[TAnthropicOptions]): """Anthropic Chat client.""" OTEL_PROVIDER_NAME: ClassVar[str] = "anthropic" # type: ignore[reportIncompatibleVariableOverride, misc] @@ -177,6 +293,18 @@ def __init__( anthropic_client=anthropic_client, ) + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework.anthropic import AnthropicChatOptions + + + class MyOptions(AnthropicChatOptions, total=False): + my_custom_option: str + + + client: AnthropicClient[MyOptions] = AnthropicClient(model_id="claude-sonnet-4-5-20250929") + response = await client.get_response("Hello", options={"my_custom_option": "value"}) + """ try: anthropic_settings = AnthropicSettings( @@ -212,29 +340,31 @@ def __init__( # region Get response methods + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: # prepare - run_options = self._prepare_options(messages, chat_options, **kwargs) + run_options = self._prepare_options(messages, options, **kwargs) # execute message = await self.anthropic_client.beta.messages.create(**run_options, stream=False) # process return self._process_message(message) + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: # prepare - run_options = self._prepare_options(messages, chat_options, **kwargs) + run_options = self._prepare_options(messages, options, **kwargs) # execute and process async for chunk in await self.anthropic_client.beta.messages.create(**run_options, stream=True): parsed_chunk = self._process_stream_event(chunk) @@ -246,35 +376,31 @@ async def _inner_get_streaming_response( def _prepare_options( self, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> dict[str, Any]: - """Create run options for the Anthropic client based on messages and chat options. + """Create run options for the Anthropic client based on messages and options. Args: messages: The list of chat messages. - chat_options: The chat options. + options: The options dict. kwargs: Additional keyword arguments. Returns: A dictionary of run options for the Anthropic client. """ - run_options: dict[str, Any] = chat_options.to_dict( - exclude={ - "type", - "instructions", # handled via system message - "tool_choice", # handled separately - "allow_multiple_tool_calls", # handled via tool_choice - "additional_properties", # handled separately - } - ) + # Prepend instructions from options if they exist + instructions = options.get("instructions") + if instructions: + from agent_framework._types import prepend_instructions_to_messages - # translations between ChatOptions and Anthropic API - translations = { - "model_id": "model", - "stop": "stop_sequences", - } - for old_key, new_key in translations.items(): + messages = prepend_instructions_to_messages(list(messages), instructions, role="system") + + # Start with a copy of options + run_options: dict[str, Any] = {k: v for k, v in options.items() if v is not None and k not in {"instructions"}} + + # Translation between options keys and Anthropic Messages API + for old_key, new_key in OPTION_TRANSLATIONS.items(): if old_key in run_options and old_key != new_key: run_options[new_key] = run_options.pop(old_key) @@ -296,31 +422,30 @@ def _prepare_options( run_options["system"] = messages[0].text # betas - run_options["betas"] = self._prepare_betas(chat_options) + run_options["betas"] = self._prepare_betas(options) # extra headers run_options["extra_headers"] = {"User-Agent": AGENT_FRAMEWORK_USER_AGENT} + # Handle user option -> metadata.user_id (Anthropic uses metadata.user_id instead of user) + if user := run_options.pop("user", None): + metadata = run_options.get("metadata", {}) + if "user_id" not in metadata: + metadata["user_id"] = user + run_options["metadata"] = metadata + # tools, mcp servers and tool choice - if tools_config := self._prepare_tools_for_anthropic(chat_options): + if tools_config := self._prepare_tools_for_anthropic(options): run_options.update(tools_config) - # additional properties - additional_options = { - key: value - for key, value in chat_options.additional_properties.items() - if value is not None and key != "additional_beta_flags" - } - if additional_options: - run_options.update(additional_options) run_options.update(kwargs) return run_options - def _prepare_betas(self, chat_options: ChatOptions) -> set[str]: + def _prepare_betas(self, options: dict[str, Any]) -> set[str]: """Prepare the beta flags for the Anthropic API request. Args: - chat_options: The chat options that may contain additional beta flags. + options: The options dict that may contain additional beta flags. Returns: A set of beta flag strings to include in the request. @@ -328,7 +453,7 @@ def _prepare_betas(self, chat_options: ChatOptions) -> set[str]: return { *BETA_FLAGS, *self.additional_beta_flags, - *chat_options.additional_properties.get("additional_beta_flags", []), + *options.get("additional_beta_flags", []), } def _prepare_messages_for_anthropic(self, messages: MutableSequence[ChatMessage]) -> list[dict[str, Any]]: @@ -370,7 +495,10 @@ def _prepare_message_for_anthropic(self, message: ChatMessage) -> dict[str, Any] logger.debug(f"Ignoring unsupported data content media type: {content.media_type} for now") case "uri": if content.has_top_level_media_type("image"): - a_content.append({"type": "image", "source": {"type": "url", "url": content.uri}}) + a_content.append({ + "type": "image", + "source": {"type": "url", "url": content.uri}, + }) else: logger.debug(f"Ignoring unsupported data content media type: {content.media_type} for now") case "function_call": @@ -397,22 +525,25 @@ def _prepare_message_for_anthropic(self, message: ChatMessage) -> dict[str, Any] "content": a_content, } - def _prepare_tools_for_anthropic(self, chat_options: ChatOptions) -> dict[str, Any] | None: + def _prepare_tools_for_anthropic(self, options: dict[str, Any]) -> dict[str, Any] | None: """Prepare tools and tool choice configuration for the Anthropic API request. Args: - chat_options: The chat options containing tools and tool choice settings. + options: The options dict containing tools and tool choice settings. Returns: A dictionary with tools, mcp_servers, and tool_choice configuration, or None if empty. """ + from agent_framework._types import validate_tool_mode + result: dict[str, Any] = {} + tools = options.get("tools") # Process tools - if chat_options.tools: + if tools: tool_list: list[MutableMapping[str, Any]] = [] mcp_server_list: list[MutableMapping[str, Any]] = [] - for tool in chat_options.tools: + for tool in tools: match tool: case MutableMapping(): tool_list.append(tool) @@ -457,34 +588,31 @@ def _prepare_tools_for_anthropic(self, chat_options: ChatOptions) -> dict[str, A result["mcp_servers"] = mcp_server_list # Process tool choice - if chat_options.tool_choice is not None: - tool_choice_mode = ( - chat_options.tool_choice if isinstance(chat_options.tool_choice, str) else chat_options.tool_choice.mode - ) - match tool_choice_mode: - case "auto": - tool_choice: dict[str, Any] = {"type": "auto"} - if chat_options.allow_multiple_tool_calls is not None: - tool_choice["disable_parallel_tool_use"] = not chat_options.allow_multiple_tool_calls - result["tool_choice"] = tool_choice - case "required": - if ( - not isinstance(chat_options.tool_choice, str) - and chat_options.tool_choice.required_function_name - ): - tool_choice = { - "type": "tool", - "name": chat_options.tool_choice.required_function_name, - } - else: - tool_choice = {"type": "any"} - if chat_options.allow_multiple_tool_calls is not None: - tool_choice["disable_parallel_tool_use"] = not chat_options.allow_multiple_tool_calls - result["tool_choice"] = tool_choice - case "none": - result["tool_choice"] = {"type": "none"} - case _: - logger.debug(f"Ignoring unsupported tool choice mode: {tool_choice_mode} for now") + if options.get("tool_choice") is None: + return result or None + tool_mode = validate_tool_mode(options.get("tool_choice")) + allow_multiple = options.get("allow_multiple_tool_calls") + match tool_mode.get("mode"): + case "auto": + tool_choice: dict[str, Any] = {"type": "auto"} + if allow_multiple is not None: + tool_choice["disable_parallel_tool_use"] = not allow_multiple + result["tool_choice"] = tool_choice + case "required": + if "required_function_name" in tool_mode: + tool_choice = { + "type": "tool", + "name": tool_mode["required_function_name"], + } + else: + tool_choice = {"type": "any"} + if allow_multiple is not None: + tool_choice["disable_parallel_tool_use"] = not allow_multiple + result["tool_choice"] = tool_choice + case "none": + result["tool_choice"] = {"type": "none"} + case _: + logger.debug(f"Ignoring unsupported tool choice mode: {tool_mode} for now") return result or None @@ -531,7 +659,10 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons return ChatResponseUpdate( response_id=event.message.id, - contents=[*self._parse_contents_from_anthropic(event.message.content), *usage_details], + contents=[ + *self._parse_contents_from_anthropic(event.message.content), + *usage_details, + ], model_id=event.message.model, finish_reason=FINISH_REASON_MAP.get(event.message.stop_reason) if event.message.stop_reason @@ -579,7 +710,8 @@ def _parse_usage_from_anthropic(self, usage: BetaUsage | BetaMessageDeltaUsage | return usage_details def _parse_contents_from_anthropic( - self, content: Sequence[BetaContentBlock | BetaRawContentBlockDelta | BetaTextBlock] + self, + content: Sequence[BetaContentBlock | BetaRawContentBlockDelta | BetaTextBlock], ) -> list[Contents]: """Parse contents from the Anthropic message.""" contents: list[Contents] = [] @@ -609,7 +741,12 @@ def _parse_contents_from_anthropic( contents.append( CodeInterpreterToolCallContent( call_id=content_block.id, - inputs=[TextContent(text=str(content_block.input), raw_representation=content_block)], + inputs=[ + TextContent( + text=str(content_block.input), + raw_representation=content_block, + ) + ], raw_representation=content_block, ) ) @@ -630,7 +767,10 @@ def _parse_contents_from_anthropic( parsed_output = self._parse_contents_from_anthropic(content_block.content) elif isinstance(content_block.content, (str, bytes)): parsed_output = [ - TextContent(text=str(content_block.content), raw_representation=content_block) + TextContent( + text=str(content_block.content), + raw_representation=content_block, + ) ] else: parsed_output = self._parse_contents_from_anthropic([content_block.content]) @@ -679,7 +819,8 @@ def _parse_contents_from_anthropic( for code_file_content in content_block.content.content: code_outputs.append( HostedFileContent( - file_id=code_file_content.file_id, raw_representation=code_file_content + file_id=code_file_content.file_id, + raw_representation=code_file_content, ) ) contents.append( @@ -720,7 +861,8 @@ def _parse_contents_from_anthropic( for bash_file_content in content_block.content.content: contents.append( HostedFileContent( - file_id=bash_file_content.file_id, raw_representation=bash_file_content + file_id=bash_file_content.file_id, + raw_representation=bash_file_content, ) ) contents.append( @@ -847,7 +989,12 @@ def _parse_contents_from_anthropic( ) ) case "thinking" | "thinking_delta": - contents.append(TextReasoningContent(text=content_block.thinking, raw_representation=content_block)) + contents.append( + TextReasoningContent( + text=content_block.thinking, + raw_representation=content_block, + ) + ) case _: logger.debug(f"Ignoring unsupported content type: {content_block.type} for now") return contents @@ -870,7 +1017,10 @@ def _parse_citations_from_anthropic( if not cit.annotated_regions: cit.annotated_regions = [] cit.annotated_regions.append( - TextSpanRegion(start_index=citation.start_char_index, end_index=citation.end_char_index) + TextSpanRegion( + start_index=citation.start_char_index, + end_index=citation.end_char_index, + ) ) case "page_location": cit.title = citation.document_title @@ -893,7 +1043,10 @@ def _parse_citations_from_anthropic( if not cit.annotated_regions: cit.annotated_regions = [] cit.annotated_regions.append( - TextSpanRegion(start_index=citation.start_block_index, end_index=citation.end_block_index) + TextSpanRegion( + start_index=citation.start_block_index, + end_index=citation.end_block_index, + ) ) case "web_search_result_location": cit.title = citation.title @@ -906,7 +1059,10 @@ def _parse_citations_from_anthropic( if not cit.annotated_regions: cit.annotated_regions = [] cit.annotated_regions.append( - TextSpanRegion(start_index=citation.start_block_index, end_index=citation.end_block_index) + TextSpanRegion( + start_index=citation.start_block_index, + end_index=citation.end_block_index, + ) ) case _: logger.debug(f"Unknown citation type encountered: {citation.type}") diff --git a/python/packages/anthropic/tests/test_anthropic_client.py b/python/packages/anthropic/tests/test_anthropic_client.py index 8d5a42d3e1..828d9916c2 100644 --- a/python/packages/anthropic/tests/test_anthropic_client.py +++ b/python/packages/anthropic/tests/test_anthropic_client.py @@ -677,7 +677,7 @@ async def test_inner_get_response(mock_anthropic_client: MagicMock) -> None: chat_options = ChatOptions(max_tokens=10) response = await chat_client._inner_get_response( # type: ignore[attr-defined] - messages=messages, chat_options=chat_options + messages=messages, options=chat_options ) assert response is not None @@ -702,7 +702,7 @@ async def mock_stream(): chunks: list[ChatResponseUpdate] = [] async for chunk in chat_client._inner_get_streaming_response( # type: ignore[attr-defined] - messages=messages, chat_options=chat_options + messages=messages, options=chat_options ): if chunk: chunks.append(chunk) @@ -730,7 +730,7 @@ async def test_anthropic_client_integration_basic_chat() -> None: messages = [ChatMessage(role=Role.USER, text="Say 'Hello, World!' and nothing else.")] - response = await client.get_response(messages=messages, chat_options=ChatOptions(max_tokens=50)) + response = await client.get_response(messages=messages, options={"max_tokens": 50}) assert response is not None assert len(response.messages) > 0 @@ -748,7 +748,7 @@ async def test_anthropic_client_integration_streaming_chat() -> None: messages = [ChatMessage(role=Role.USER, text="Count from 1 to 5.")] chunks = [] - async for chunk in client.get_streaming_response(messages=messages, chat_options=ChatOptions(max_tokens=50)): + async for chunk in client.get_streaming_response(messages=messages, options={"max_tokens": 50}): chunks.append(chunk) assert len(chunks) > 0 @@ -766,7 +766,7 @@ async def test_anthropic_client_integration_function_calling() -> None: response = await client.get_response( messages=messages, - chat_options=ChatOptions(tools=tools, max_tokens=100), + options={"tools": tools, "max_tokens": 100}, ) assert response is not None @@ -796,7 +796,7 @@ async def test_anthropic_client_integration_hosted_tools() -> None: response = await client.get_response( messages=messages, - chat_options=ChatOptions(tools=tools, max_tokens=100), + options={"tools": tools, "max_tokens": 100}, ) assert response is not None @@ -814,7 +814,7 @@ async def test_anthropic_client_integration_with_system_message() -> None: ChatMessage(role=Role.USER, text="Hello!"), ] - response = await client.get_response(messages=messages, chat_options=ChatOptions(max_tokens=50)) + response = await client.get_response(messages=messages, options={"max_tokens": 50}) assert response is not None assert len(response.messages) > 0 @@ -830,7 +830,7 @@ async def test_anthropic_client_integration_temperature_control() -> None: response = await client.get_response( messages=messages, - chat_options=ChatOptions(max_tokens=20, temperature=0.0), + options={"max_tokens": 20, "temperature": 0.0}, ) assert response is not None diff --git a/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py b/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py index a63ad1deb2..ac81a3c50b 100644 --- a/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py +++ b/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py @@ -91,16 +91,16 @@ except ImportError: _agentic_retrieval_available = False -if sys.version_info >= (3, 11): - from typing import Self # pragma: no cover -else: - from typing_extensions import Self # pragma: no cover - if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: from typing_extensions import override # type: ignore[import] # pragma: no cover +if sys.version_info >= (3, 11): + from typing import Self # pragma: no cover +else: + from typing_extensions import Self # pragma: no cover + """Azure AI Search Context Provider for Agent Framework. This module provides context providers for Azure AI Search integration with two modes: diff --git a/python/packages/azure-ai/agent_framework_azure_ai/__init__.py b/python/packages/azure-ai/agent_framework_azure_ai/__init__.py index cf2423693d..36ed5d696b 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/__init__.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/__init__.py @@ -2,7 +2,7 @@ import importlib.metadata -from ._chat_client import AzureAIAgentClient +from ._chat_client import AzureAIAgentClient, AzureAIAgentOptions from ._client import AzureAIClient from ._shared import AzureAISettings @@ -13,6 +13,7 @@ __all__ = [ "AzureAIAgentClient", + "AzureAIAgentOptions", "AzureAIClient", "AzureAISettings", "__version__", diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py index 50d18bbdc1..e563cdc319 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py @@ -5,8 +5,8 @@ import os import re import sys -from collections.abc import AsyncIterable, MutableMapping, MutableSequence, Sequence -from typing import Any, ClassVar, TypeVar +from collections.abc import AsyncIterable, Mapping, MutableMapping, MutableSequence, Sequence +from typing import Any, ClassVar, Generic, TypedDict from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, @@ -32,7 +32,6 @@ Role, TextContent, TextSpanRegion, - ToolMode, ToolProtocol, UriContent, UsageContent, @@ -42,7 +41,7 @@ use_chat_middleware, use_function_invocation, ) -from agent_framework.exceptions import ServiceInitializationError, ServiceResponseException +from agent_framework.exceptions import ServiceInitializationError, ServiceInvalidRequestError, ServiceResponseException from agent_framework.observability import use_instrumentation from azure.ai.agents.aio import AgentsClient from azure.ai.agents.models import ( @@ -90,10 +89,18 @@ ToolOutput, ) from azure.core.credentials_async import AsyncTokenCredential -from pydantic import ValidationError +from pydantic import BaseModel, ValidationError from ._shared import AzureAISettings +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore # pragma: no cover +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover if sys.version_info >= (3, 11): from typing import Self # pragma: no cover else: @@ -102,14 +109,106 @@ logger = get_logger("agent_framework.azure") +__all__ = ["AzureAIAgentClient", "AzureAIAgentOptions"] + + +# region Azure AI Agent Options TypedDict + + +class AzureAIAgentOptions(ChatOptions, total=False): + """Azure AI Foundry Agent Service-specific options dict. + + Extends base ChatOptions with Azure AI Agent Service parameters. + Azure AI Agents provides a managed agent runtime with built-in + tools for code interpreter, file search, and web search. + + See: https://learn.microsoft.com/azure/ai-services/agents/ + + Keys: + # Inherited from ChatOptions: + model_id: The model deployment name, + translates to ``model`` in Azure AI API. + temperature: Sampling temperature between 0 and 2. + top_p: Nucleus sampling parameter. + max_tokens: Maximum number of tokens to generate, + translates to ``max_completion_tokens`` in Azure AI API. + tools: List of tools available to the agent. + tool_choice: How the model should use tools. + allow_multiple_tool_calls: Whether to allow parallel tool calls, + translates to ``parallel_tool_calls`` in Azure AI API. + response_format: Structured output schema. + metadata: Request metadata for tracking. + instructions: System instructions for the agent. + + # Options not supported in Azure AI Agent Service: + stop: Not supported. + seed: Not supported. + frequency_penalty: Not supported. + presence_penalty: Not supported. + user: Not supported. + store: Not supported. + logit_bias: Not supported. + + # Azure AI Agent-specific options: + conversation_id: Thread ID to continue conversation in. + tool_resources: Resources for tools (file IDs, vector stores). + """ + + # Azure AI Agent-specific options + conversation_id: str # type: ignore[misc] + """Thread ID to continue a conversation in an existing thread.""" + + tool_resources: dict[str, Any] + """Tool-specific resources for code_interpreter and file_search. + For code_interpreter: {"file_ids": ["file-abc123"]} + For file_search: {"vector_store_ids": ["vs-abc123"]} + """ + + # ChatOptions fields not supported in Azure AI Agent Service + stop: None # type: ignore[misc] + """Not supported in Azure AI Agent Service.""" + + seed: None # type: ignore[misc] + """Not supported in Azure AI Agent Service.""" + + frequency_penalty: None # type: ignore[misc] + """Not supported in Azure AI Agent Service.""" + + presence_penalty: None # type: ignore[misc] + """Not supported in Azure AI Agent Service.""" + + user: None # type: ignore[misc] + """Not supported in Azure AI Agent Service.""" + + store: None # type: ignore[misc] + """Not supported in Azure AI Agent Service.""" + + logit_bias: None # type: ignore[misc] + """Not supported in Azure AI Agent Service.""" + + +AZURE_AI_AGENT_OPTION_TRANSLATIONS: dict[str, str] = { + "model_id": "model", + "max_tokens": "max_completion_tokens", + "allow_multiple_tool_calls": "parallel_tool_calls", +} +"""Maps ChatOptions keys to Azure AI Agents API parameter names.""" + +TAzureAIAgentOptions = TypeVar( + "TAzureAIAgentOptions", + bound=TypedDict, # type: ignore[valid-type] + default="AzureAIAgentOptions", + covariant=True, +) + -TAzureAIAgentClient = TypeVar("TAzureAIAgentClient", bound="AzureAIAgentClient") +# endregion @use_function_invocation @use_instrumentation @use_chat_middleware -class AzureAIAgentClient(BaseChatClient): +class AzureAIAgentClient(BaseChatClient[TAzureAIAgentOptions], Generic[TAzureAIAgentOptions]): """Azure AI Agent Chat client.""" OTEL_PROVIDER_NAME: ClassVar[str] = "azure.ai" # type: ignore[reportIncompatibleVariableOverride, misc] @@ -162,19 +261,31 @@ def __init__( # Using environment variables # Set AZURE_AI_PROJECT_ENDPOINT=https://your-project.cognitiveservices.azure.com - # Set AZURE_AI_MODEL_DEPLOYMENT_NAME=gpt-4 + # Set AZURE_AI_MODEL_DEPLOYMENT_NAME= credential = DefaultAzureCredential() client = AzureAIAgentClient(credential=credential) # Or passing parameters directly client = AzureAIAgentClient( project_endpoint="https://your-project.cognitiveservices.azure.com", - model_deployment_name="gpt-4", + model_deployment_name="", credential=credential, ) # Or loading from a .env file client = AzureAIAgentClient(credential=credential, env_file_path="path/to/.env") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework_azure_ai import AzureAIAgentOptions + + + class MyOptions(AzureAIAgentOptions, total=False): + my_custom_option: str + + + client: AzureAIAgentClient[MyOptions] = AzureAIAgentClient(credential=credential) + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ try: azure_ai_settings = AzureAISettings( @@ -240,46 +351,29 @@ async def close(self) -> None: await self._cleanup_agent_if_needed() await self._close_client_if_needed() - @classmethod - def from_settings(cls: type[TAzureAIAgentClient], settings: dict[str, Any]) -> TAzureAIAgentClient: - """Initialize a AzureAIAgentClient from a dictionary of settings. - - Args: - settings: A dictionary of settings for the service. - """ - return cls( - agents_client=settings.get("agents_client"), - agent_id=settings.get("agent_id"), - thread_id=settings.get("thread_id"), - project_endpoint=settings.get("project_endpoint"), - model_deployment_name=settings.get("model_deployment_name"), - agent_name=settings.get("agent_name"), - credential=settings.get("credential"), - env_file_path=settings.get("env_file_path"), - should_cleanup_agent=settings.get("should_cleanup_agent", True), - ) - + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: return await ChatResponse.from_chat_response_generator( - updates=self._inner_get_streaming_response(messages=messages, chat_options=chat_options, **kwargs), - output_format_type=chat_options.response_format, + updates=self._inner_get_streaming_response(messages=messages, options=options, **kwargs), + output_format_type=options.get("response_format"), ) + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: Mapping[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: # prepare - run_options, required_action_results = await self._prepare_options(messages, chat_options, **kwargs) + run_options, required_action_results = await self._prepare_options(messages, options, **kwargs) agent_id = await self._get_agent_id_or_create(run_options) # execute and process @@ -783,46 +877,31 @@ async def _load_agent_definition_if_needed(self) -> Agent | None: self._agent_definition = await self.agents_client.get_agent(self.agent_id) return self._agent_definition - def _prepare_tool_choice(self, chat_options: ChatOptions) -> None: - """Prepare the tools and tool choice for the chat options. - - Args: - chat_options: The chat options to prepare. - """ - chat_tool_mode = chat_options.tool_choice - if chat_tool_mode is None or chat_tool_mode == ToolMode.NONE or chat_tool_mode == "none": - chat_options.tools = None - chat_options.tool_choice = ToolMode.NONE - return - - chat_options.tool_choice = chat_tool_mode - async def _prepare_options( self, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: Mapping[str, Any], **kwargs: Any, ) -> tuple[dict[str, Any], list[FunctionResultContent | FunctionApprovalResponseContent] | None]: agent_definition = await self._load_agent_definition_if_needed() - # Use to_dict with exclusions for properties handled separately - run_options: dict[str, Any] = chat_options.to_dict( - exclude={ - "type", - "instructions", # handled via messages - "tools", # handled separately - "tool_choice", # handled separately - "response_format", # handled separately - "additional_properties", # handled separately - "frequency_penalty", # not supported - "presence_penalty", # not supported - "user", # not supported - "stop", # not supported - "logit_bias", # not supported - "seed", # not supported - "store", # not supported - } - ) + # Build run_options from options dict, excluding specific keys + exclude_keys = { + "type", + "instructions", # handled via messages + "tools", # handled separately + "tool_choice", # handled separately + "response_format", # handled separately + "additional_properties", # handled separately + "frequency_penalty", # not supported + "presence_penalty", # not supported + "user", # not supported + "stop", # not supported + "logit_bias", # not supported + "seed", # not supported + "store", # not supported + } + run_options: dict[str, Any] = {k: v for k, v in options.items() if k not in exclude_keys and v is not None} # Translation between ChatOptions and Azure AI Agents API translations = { @@ -840,21 +919,31 @@ async def _prepare_options( # tools and tool_choice if tool_definitions := await self._prepare_tool_definitions_and_resources( - chat_options, agent_definition, run_options + options, agent_definition, run_options ): run_options["tools"] = tool_definitions - if tool_choice := self._prepare_tool_choice_mode(chat_options): + if tool_choice := self._prepare_tool_choice_mode(options): run_options["tool_choice"] = tool_choice # response format - if chat_options.response_format is not None: - run_options["response_format"] = ResponseFormatJsonSchemaType( - json_schema=ResponseFormatJsonSchema( - name=chat_options.response_format.__name__, - schema=chat_options.response_format.model_json_schema(), + response_format = options.get("response_format") + if response_format is not None: + if isinstance(response_format, type) and issubclass(response_format, BaseModel): + # Pydantic model - convert to Azure format + run_options["response_format"] = ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name=response_format.__name__, + schema=response_format.model_json_schema(), + ) + ) + elif isinstance(response_format, Mapping): + # Runtime JSON schema dict - pass through as-is + run_options["response_format"] = response_format + else: + raise ServiceInvalidRequestError( + "response_format must be a Pydantic BaseModel class or a dict with runtime JSON schema." ) - ) # messages additional_messages, instructions, required_action_results = self._prepare_messages(messages) @@ -873,41 +962,40 @@ async def _prepare_options( run_options["instructions"] = "\n".join(instructions) # thread_id resolution (conversation_id takes precedence, then kwargs, then instance default) - run_options["thread_id"] = chat_options.conversation_id or kwargs.get("conversation_id") or self.thread_id + run_options["thread_id"] = options.get("conversation_id") or kwargs.get("conversation_id") or self.thread_id return run_options, required_action_results def _prepare_tool_choice_mode( - self, chat_options: ChatOptions + self, options: Mapping[str, Any] ) -> AgentsToolChoiceOptionMode | AgentsNamedToolChoice | None: """Prepare the tool choice mode for Azure AI Agents API.""" - if chat_options.tool_choice is None: + tool_choice = options.get("tool_choice") + if tool_choice is None: return None - if chat_options.tool_choice == "none": + if tool_choice == "none": return AgentsToolChoiceOptionMode.NONE - if chat_options.tool_choice == "auto": + if tool_choice == "auto": return AgentsToolChoiceOptionMode.AUTO - if ( - isinstance(chat_options.tool_choice, ToolMode) - and chat_options.tool_choice == "required" - and chat_options.tool_choice.required_function_name is not None - ): - return AgentsNamedToolChoice( - type=AgentsNamedToolChoiceType.FUNCTION, - function=FunctionName(name=chat_options.tool_choice.required_function_name), - ) + if isinstance(tool_choice, Mapping) and tool_choice.get("mode") == "required": + req_fn = tool_choice.get("required_function_name") + if req_fn: + return AgentsNamedToolChoice( + type=AgentsNamedToolChoiceType.FUNCTION, + function=FunctionName(name=str(req_fn)), + ) return None async def _prepare_tool_definitions_and_resources( self, - chat_options: ChatOptions, + options: Mapping[str, Any], agent_definition: Agent | None, run_options: dict[str, Any], ) -> list[ToolDefinition | dict[str, Any]]: """Prepare tool definitions and resources for the run options.""" tool_definitions: list[ToolDefinition | dict[str, Any]] = [] - # Add tools from existing agent (exclude function tools - passed via chat_options.tools) + # Add tools from existing agent (exclude function tools - passed via options.get("tools")) if agent_definition is not None: agent_tools = [tool for tool in agent_definition.tools if not isinstance(tool, FunctionToolDefinition)] if agent_tools: @@ -916,11 +1004,13 @@ async def _prepare_tool_definitions_and_resources( run_options["tool_resources"] = agent_definition.tool_resources # Add run tools if tool_choice allows - if chat_options.tool_choice is not None and chat_options.tool_choice != "none" and chat_options.tools: - tool_definitions.extend(await self._prepare_tools_for_azure_ai(chat_options.tools, run_options)) + tool_choice = options.get("tool_choice") + tools = options.get("tools") + if tool_choice is not None and tool_choice != "none" and tools: + tool_definitions.extend(await self._prepare_tools_for_azure_ai(tools, run_options)) # Handle MCP tool resources - mcp_resources = self._prepare_mcp_resources(chat_options.tools) + mcp_resources = self._prepare_mcp_resources(tools) if mcp_resources: if "tool_resources" not in run_options: run_options["tool_resources"] = {} diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py index 0c7b9c9782..6395993cd7 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -2,12 +2,11 @@ import sys from collections.abc import Mapping, MutableSequence -from typing import Any, ClassVar, TypeVar, cast +from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypedDict, cast from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, ChatMessage, - ChatOptions, HostedMCPTool, TextContent, get_logger, @@ -32,27 +31,37 @@ from ._shared import AzureAISettings -if sys.version_info >= (3, 11): - from typing import Self # pragma: no cover -else: - from typing_extensions import Self # pragma: no cover +if TYPE_CHECKING: + from agent_framework.openai import OpenAIResponsesOptions +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore # pragma: no cover if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: from typing_extensions import override # type: ignore[import] # pragma: no cover +if sys.version_info >= (3, 11): + from typing import Self # pragma: no cover +else: + from typing_extensions import Self # pragma: no cover logger = get_logger("agent_framework.azure") - -TAzureAIClient = TypeVar("TAzureAIClient", bound="AzureAIClient") +TAzureAIClientOptions = TypeVar( + "TAzureAIClientOptions", + bound=TypedDict, # type: ignore[valid-type] + default="OpenAIResponsesOptions", + covariant=True, +) @use_function_invocation @use_instrumentation @use_chat_middleware -class AzureAIClient(OpenAIBaseResponsesClient): +class AzureAIClient(OpenAIBaseResponsesClient[TAzureAIClientOptions], Generic[TAzureAIClientOptions]): """Azure AI Agent client.""" OTEL_PROVIDER_NAME: ClassVar[str] = "azure.ai" # type: ignore[reportIncompatibleVariableOverride, misc] @@ -115,6 +124,18 @@ def __init__( # Or loading from a .env file client = AzureAIClient(credential=credential, env_file_path="path/to/.env") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework import ChatOptions + + + class MyOptions(ChatOptions, total=False): + my_custom_option: str + + + client: AzureAIClient[MyOptions] = AzureAIClient(credential=credential) + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ try: azure_ai_settings = AzureAISettings( @@ -266,7 +287,7 @@ async def close(self) -> None: await self._close_client_if_needed() def _create_text_format_config( - self, response_format: Any + self, response_format: type[BaseModel] | Mapping[str, Any] ) -> ( ResponseTextFormatConfigurationJsonSchema | ResponseTextFormatConfigurationJsonObject @@ -274,18 +295,25 @@ def _create_text_format_config( ): """Convert response_format into Azure text format configuration.""" if isinstance(response_format, type) and issubclass(response_format, BaseModel): + schema = response_format.model_json_schema() + # Ensure additionalProperties is explicitly false to satisfy Azure validation + if isinstance(schema, dict): + schema.setdefault("additionalProperties", False) return ResponseTextFormatConfigurationJsonSchema( name=response_format.__name__, - schema=response_format.model_json_schema(), + schema=schema, ) if isinstance(response_format, Mapping): format_config = self._convert_response_format(response_format) format_type = format_config.get("type") if format_type == "json_schema": + # Ensure schema includes additionalProperties=False to satisfy Azure validation + schema = dict(format_config.get("schema", {})) # type: ignore[assignment] + schema.setdefault("additionalProperties", False) config_kwargs: dict[str, Any] = { "name": format_config.get("name") or "response", - "schema": format_config["schema"], + "schema": schema, } if "strict" in format_config: config_kwargs["strict"] = format_config["strict"] @@ -303,7 +331,7 @@ async def _get_agent_reference_or_create( self, run_options: dict[str, Any], messages_instructions: str | None, - chat_options: ChatOptions | None = None, + chat_options: Mapping[str, Any] | None = None, ) -> dict[str, str]: """Determine which agent to use and create if needed. @@ -315,11 +343,6 @@ async def _get_agent_reference_or_create( Returns: dict[str, str]: The agent reference to use. """ - # chat_options is needed separately because the base class excludes response_format - # from run_options (transforming it to text/text_format for OpenAI). Azure's agent - # creation API requires the original response_format to build its own config format. - if chat_options is None: - chat_options = ChatOptions() # Agent name must be explicitly provided by the user. if self.agent_name is None: raise ServiceInitializationError( @@ -356,12 +379,7 @@ async def _get_agent_reference_or_create( # response_format is accessed from chat_options or additional_properties # since the base class excludes it from run_options - response_format: Any = ( - chat_options.response_format - if chat_options.response_format is not None - else chat_options.additional_properties.get("response_format") - ) - if response_format: + if chat_options and (response_format := chat_options.get("response_format")): args["text"] = PromptAgentDefinitionText(format=self._create_text_format_config(response_format)) # Combine instructions from messages and options @@ -392,12 +410,12 @@ async def _close_client_if_needed(self) -> None: async def _prepare_options( self, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> dict[str, Any]: """Take ChatOptions and create the specific options for Azure AI.""" prepared_messages, instructions = self._prepare_messages_for_azure_ai(messages) - run_options = await super()._prepare_options(prepared_messages, chat_options, **kwargs) + run_options = await super()._prepare_options(prepared_messages, options, **kwargs) # WORKAROUND: Azure AI Projects 'create responses' API has schema divergence from OpenAI's # Responses API. Azure requires 'type' at item level and 'annotations' in content items. @@ -409,12 +427,20 @@ async def _prepare_options( if not self._is_application_endpoint: # Application-scoped response APIs do not support "agent" property. - agent_reference = await self._get_agent_reference_or_create(run_options, instructions, chat_options) + agent_reference = await self._get_agent_reference_or_create(run_options, instructions, options) run_options["extra_body"] = {"agent": agent_reference} # Remove properties that are not supported on request level # but were configured on agent level - exclude = ["model", "tools", "response_format", "temperature", "top_p", "text", "text_format"] + exclude = [ + "model", + "tools", + "response_format", + "temperature", + "top_p", + "text", + "text_format", + ] for property in exclude: run_options.pop(property, None) @@ -467,9 +493,9 @@ def _transform_input_for_azure_ai(self, input_items: list[dict[str, Any]]) -> li return transformed @override - def _get_current_conversation_id(self, chat_options: ChatOptions, **kwargs: Any) -> str | None: + def _get_current_conversation_id(self, options: dict[str, Any], **kwargs: Any) -> str | None: """Get the current conversation ID from chat options or kwargs.""" - return chat_options.conversation_id or kwargs.get("conversation_id") or self.conversation_id + return options.get("conversation_id") or kwargs.get("conversation_id") or self.conversation_id def _prepare_messages_for_azure_ai( self, messages: MutableSequence[ChatMessage] diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py index 134a3586b0..87e52e7b32 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py @@ -31,7 +31,6 @@ HostedWebSearchTool, Role, TextContent, - ToolMode, UriContent, ) from agent_framework._serialization import SerializationMixin @@ -197,34 +196,6 @@ def test_azure_ai_chat_client_init_missing_model_deployment_for_agent_creation() ) -def test_azure_ai_chat_client_from_dict(mock_agents_client: MagicMock) -> None: - """Test AzureAIAgentClient.from_dict method.""" - settings = { - "agents_client": mock_agents_client, - "agent_id": "test-agent-id", - "thread_id": "test-thread-id", - "project_endpoint": "https://test-endpoint.com/", - "model_deployment_name": "test-model", - "agent_name": "TestAgent", - } - - azure_ai_settings = AzureAISettings( - project_endpoint=settings["project_endpoint"], - model_deployment_name=settings["model_deployment_name"], - ) - - chat_client: AzureAIAgentClient = create_test_azure_ai_chat_client( - mock_agents_client, - agent_id=settings["agent_id"], # type: ignore - thread_id=settings["thread_id"], # type: ignore - azure_ai_settings=azure_ai_settings, - ) - - assert chat_client.agents_client is mock_agents_client - assert chat_client.agent_id == "test-agent-id" - assert chat_client.thread_id == "test-thread-id" - - def test_azure_ai_chat_client_init_missing_credential(azure_ai_unit_test_env: dict[str, str]) -> None: """Test AzureAIAgentClient.__init__ when credential is missing and no agents_client provided.""" with pytest.raises( @@ -253,7 +224,7 @@ def test_azure_ai_chat_client_init_validation_error(mock_azure_credential: Magic ) -def test_azure_ai_chat_client_from_settings() -> None: +def test_azure_ai_chat_client_from_dict() -> None: """Test from_settings class method.""" mock_agents_client = MagicMock() settings = { @@ -265,7 +236,7 @@ def test_azure_ai_chat_client_from_settings() -> None: "agent_name": "TestAgent", } - client = AzureAIAgentClient.from_settings(settings) + client = AzureAIAgentClient.from_dict(settings) assert client.agents_client is mock_agents_client assert client.agent_id == "test-agent" @@ -372,7 +343,7 @@ async def test_azure_ai_chat_client_prepare_options_basic(mock_agents_client: Ma chat_client = create_test_azure_ai_chat_client(mock_agents_client) messages = [ChatMessage(role=Role.USER, text="Hello")] - chat_options = ChatOptions(max_tokens=100, temperature=0.7) + chat_options: ChatOptions = {"max_tokens": 100, "temperature": 0.7} run_options, tool_results = await chat_client._prepare_options(messages, chat_options) # type: ignore @@ -386,7 +357,7 @@ async def test_azure_ai_chat_client_prepare_options_no_chat_options(mock_agents_ messages = [ChatMessage(role=Role.USER, text="Hello")] - run_options, tool_results = await chat_client._prepare_options(messages, ChatOptions()) # type: ignore + run_options, tool_results = await chat_client._prepare_options(messages, {}) # type: ignore assert run_options is not None assert tool_results is None @@ -403,7 +374,7 @@ async def test_azure_ai_chat_client_prepare_options_with_image_content(mock_agen image_content = UriContent(uri="https://example.com/image.jpg", media_type="image/jpeg") messages = [ChatMessage(role=Role.USER, contents=[image_content])] - run_options, _ = await chat_client._prepare_options(messages, ChatOptions()) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, {}) # type: ignore assert "additional_messages" in run_options assert len(run_options["additional_messages"]) == 1 @@ -494,7 +465,7 @@ async def test_azure_ai_chat_client_prepare_options_with_messages(mock_agents_cl ChatMessage(role=Role.USER, text="Hello"), ] - run_options, _ = await chat_client._prepare_options(messages, ChatOptions()) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, {}) # type: ignore assert "instructions" in run_options assert "You are a helpful assistant" in run_options["instructions"] @@ -506,7 +477,7 @@ async def test_azure_ai_chat_client_inner_get_response(mock_agents_client: Magic """Test _inner_get_response method.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") messages = [ChatMessage(role=Role.USER, text="Hello")] - chat_options = ChatOptions() + chat_options: ChatOptions = {} async def mock_streaming_response(): yield ChatResponseUpdate(role=Role.ASSISTANT, text="Hello back") @@ -518,7 +489,7 @@ async def mock_streaming_response(): mock_response = ChatResponse(role=Role.ASSISTANT, text="Hello back") mock_from_generator.return_value = mock_response - result = await chat_client._inner_get_response(messages=messages, chat_options=chat_options) # type: ignore + result = await chat_client._inner_get_response(messages=messages, options=chat_options) # type: ignore assert result is mock_response mock_from_generator.assert_called_once() @@ -627,8 +598,7 @@ async def test_azure_ai_chat_client_prepare_options_with_none_tool_choice( """Test _prepare_options with tool_choice set to 'none'.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) - chat_options = ChatOptions() - chat_options.tool_choice = "none" + chat_options: ChatOptions = {"tool_choice": "none"} run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore @@ -643,8 +613,7 @@ async def test_azure_ai_chat_client_prepare_options_with_auto_tool_choice( """Test _prepare_options with tool_choice set to 'auto'.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) - chat_options = ChatOptions() - chat_options.tool_choice = "auto" + chat_options = {"tool_choice": "auto"} run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore @@ -653,35 +622,17 @@ async def test_azure_ai_chat_client_prepare_options_with_auto_tool_choice( assert run_options["tool_choice"] == AgentsToolChoiceOptionMode.AUTO -async def test_azure_ai_chat_client_prepare_tool_choice_none_string( - mock_agents_client: MagicMock, -) -> None: - """Test _prepare_tool_choice when tool_choice is string 'none'.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) - - # Create a mock tool for testing - mock_tool = MagicMock() - chat_options = ChatOptions(tools=[mock_tool], tool_choice="none") - - # Call the method - chat_client._prepare_tool_choice(chat_options) # type: ignore - - # Verify tools are cleared and tool_choice is set to NONE mode - assert chat_options.tools is None - assert chat_options.tool_choice == ToolMode.NONE.mode - - async def test_azure_ai_chat_client_prepare_options_tool_choice_required_specific_function( mock_agents_client: MagicMock, ) -> None: - """Test _prepare_options with ToolMode.REQUIRED specifying a specific function name.""" + """Test _prepare_options with required tool_choice specifying a specific function name.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) - required_tool_mode = ToolMode.REQUIRED("specific_function_name") + required_tool_mode = {"mode": "required", "required_function_name": "specific_function_name"} dict_tool = {"type": "function", "function": {"name": "test_function"}} - chat_options = ChatOptions(tools=[dict_tool], tool_choice=required_tool_mode) + chat_options = {"tools": [dict_tool], "tool_choice": required_tool_mode} messages = [ChatMessage(role=Role.USER, text="Hello")] run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore @@ -703,8 +654,7 @@ async def test_azure_ai_chat_client_prepare_options_with_response_format( class TestResponseModel(BaseModel): name: str = Field(description="Test name") - chat_options = ChatOptions() - chat_options.response_format = TestResponseModel + chat_options: ChatOptions = {"response_format": TestResponseModel} run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore @@ -783,7 +733,7 @@ async def test_azure_ai_chat_client_prepare_options_mcp_never_require(mock_agent mcp_tool = HostedMCPTool(name="Test MCP Tool", url="https://example.com/mcp", approval_mode="never_require") messages = [ChatMessage(role=Role.USER, text="Hello")] - chat_options = ChatOptions(tools=[mcp_tool], tool_choice="auto") + chat_options: ChatOptions = {"tools": [mcp_tool], "tool_choice": "auto"} with patch("agent_framework_azure_ai._chat_client.McpTool") as mock_mcp_tool_class: # Mock _prepare_tools_for_azure_ai to avoid actual tool preparation @@ -816,7 +766,7 @@ async def test_azure_ai_chat_client_prepare_options_mcp_with_headers(mock_agents ) messages = [ChatMessage(role=Role.USER, text="Hello")] - chat_options = ChatOptions(tools=[mcp_tool], tool_choice="auto") + chat_options: ChatOptions = {"tools": [mcp_tool], "tool_choice": "auto"} with patch("agent_framework_azure_ai._chat_client.McpTool") as mock_mcp_tool_class: # Mock _prepare_tools_for_azure_ai to avoid actual tool preparation @@ -1518,8 +1468,7 @@ async def test_azure_ai_chat_client_get_response_tools() -> None: # Test that the agents_client can be used to get a response response = await azure_ai_chat_client.get_response( messages=messages, - tools=[get_weather], - tool_choice="auto", + options={"tools": [get_weather], "tool_choice": "auto"}, ) assert response is not None @@ -1571,8 +1520,7 @@ async def test_azure_ai_chat_client_streaming_tools() -> None: # Test that the agents_client can be used to get a response response = azure_ai_chat_client.get_streaming_response( messages=messages, - tools=[get_weather], - tool_choice="auto", + options={"tools": [get_weather], "tool_choice": "auto"}, ) full_message: str = "" async for chunk in response: @@ -1772,7 +1720,7 @@ async def test_azure_ai_chat_client_agent_hosted_mcp_tool() -> None: ) as agent: response = await agent.run( "How to create an Azure storage account using az cli?", - max_tokens=200, + options={"max_tokens": 200}, ) assert isinstance(response, AgentRunResponse) @@ -1823,20 +1771,14 @@ async def test_azure_ai_chat_client_agent_chat_options_run_level() -> None: ) as agent: response = await agent.run( "Provide a brief, helpful response.", - max_tokens=100, - temperature=0.7, - top_p=0.9, - seed=123, - user="comprehensive-test-user", tools=[get_weather], - tool_choice="auto", - frequency_penalty=0.1, - presence_penalty=0.1, - stop=["END"], - store=True, - logit_bias={"test": 1}, - metadata={"test": "value"}, - additional_properties={"custom_param": "test_value"}, + options={ + "max_tokens": 100, + "temperature": 0.7, + "top_p": 0.9, + "tool_choice": "auto", + "metadata": {"test": "value"}, + }, ) assert isinstance(response, AgentRunResponse) @@ -1850,20 +1792,14 @@ async def test_azure_ai_chat_client_agent_chat_options_agent_level() -> None: async with ChatAgent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", - max_tokens=100, - temperature=0.7, - top_p=0.9, - seed=123, - user="comprehensive-test-user", tools=[get_weather], - tool_choice="auto", - frequency_penalty=0.1, - presence_penalty=0.1, - stop=["END"], - store=True, - logit_bias={"test": 1}, - metadata={"test": "value"}, - request_kwargs={"custom_param": "test_value"}, + default_options={ + "max_tokens": 100, + "temperature": 0.7, + "top_p": 0.9, + "tool_choice": "auto", + "metadata": {"test": "value"}, + }, ) as agent: response = await agent.run( "Provide a brief, helpful response.", diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index 3b1b500ede..57180715e1 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -1,19 +1,24 @@ # Copyright (c) Microsoft. All rights reserved. +import json import os -from collections.abc import AsyncIterator +from collections.abc import AsyncGenerator, AsyncIterator from contextlib import asynccontextmanager -from typing import Annotated +from typing import Annotated, Any from unittest.mock import AsyncMock, MagicMock, patch +from uuid import uuid4 import pytest from agent_framework import ( AgentRunResponse, - AgentRunResponseUpdate, ChatAgent, ChatClientProtocol, ChatMessage, ChatOptions, + ChatResponse, + HostedCodeInterpreterTool, + HostedMCPTool, + HostedWebSearchTool, Role, TextContent, ) @@ -26,6 +31,7 @@ from openai.types.responses.parsed_response import ParsedResponse from openai.types.responses.response import Response as OpenAIResponse from pydantic import BaseModel, ConfigDict, Field, ValidationError +from pytest import fixture, param from agent_framework_azure_ai import AzureAIClient, AzureAISettings @@ -41,6 +47,32 @@ ) +@pytest.fixture +def mock_project_client() -> MagicMock: + """Fixture that provides a mock AIProjectClient.""" + mock_client = MagicMock() + + # Mock agents property + mock_client.agents = MagicMock() + mock_client.agents.create_version = AsyncMock() + + # Mock conversations property + mock_client.conversations = MagicMock() + mock_client.conversations.create = AsyncMock() + + # Mock telemetry property + mock_client.telemetry = MagicMock() + mock_client.telemetry.get_application_insights_connection_string = AsyncMock() + + # Mock get_openai_client method + mock_client.get_openai_client = AsyncMock() + + # Mock close method + mock_client.close = AsyncMock() + + return mock_client + + @asynccontextmanager async def temporary_chat_client(agent_name: str) -> AsyncIterator[AzureAIClient]: """Async context manager that creates an Azure AI agent and yields an `AzureAIClient`. @@ -121,7 +153,7 @@ def test_azure_ai_settings_init_with_explicit_values() -> None: assert settings.model_deployment_name == "custom-model" -def test_azure_ai_client_init_with_project_client(mock_project_client: MagicMock) -> None: +def test_init_with_project_client(mock_project_client: MagicMock) -> None: """Test AzureAIClient initialization with existing project_client.""" with patch("agent_framework_azure_ai._client.AzureAISettings") as mock_settings: mock_settings.return_value.project_endpoint = None @@ -140,7 +172,7 @@ def test_azure_ai_client_init_with_project_client(mock_project_client: MagicMock assert isinstance(client, ChatClientProtocol) -def test_azure_ai_client_init_auto_create_client( +def test_init_auto_create_client( azure_ai_unit_test_env: dict[str, str], mock_azure_credential: MagicMock, ) -> None: @@ -164,7 +196,7 @@ def test_azure_ai_client_init_auto_create_client( mock_ai_project_client.assert_called_once() -def test_azure_ai_client_init_missing_project_endpoint() -> None: +def test_init_missing_project_endpoint() -> None: """Test AzureAIClient initialization when project_endpoint is missing and no project_client provided.""" with patch("agent_framework_azure_ai._client.AzureAISettings") as mock_settings: mock_settings.return_value.project_endpoint = None @@ -174,7 +206,7 @@ def test_azure_ai_client_init_missing_project_endpoint() -> None: AzureAIClient(credential=MagicMock()) -def test_azure_ai_client_init_missing_credential(azure_ai_unit_test_env: dict[str, str]) -> None: +def test_init_missing_credential(azure_ai_unit_test_env: dict[str, str]) -> None: """Test AzureAIClient.__init__ when credential is missing and no project_client provided.""" with pytest.raises( ServiceInitializationError, match="Azure credential is required when project_client is not provided" @@ -185,7 +217,7 @@ def test_azure_ai_client_init_missing_credential(azure_ai_unit_test_env: dict[st ) -def test_azure_ai_client_init_validation_error(mock_azure_credential: MagicMock) -> None: +def test_init_validation_error(mock_azure_credential: MagicMock) -> None: """Test that ValidationError in AzureAISettings is properly handled.""" with patch("agent_framework_azure_ai._client.AzureAISettings") as mock_settings: mock_settings.side_effect = ValidationError.from_exception_data("test", []) @@ -194,7 +226,7 @@ def test_azure_ai_client_init_validation_error(mock_azure_credential: MagicMock) AzureAIClient(credential=mock_azure_credential) -async def test_azure_ai_client_get_agent_reference_or_create_existing_version( +async def test_get_agent_reference_or_create_existing_version( mock_project_client: MagicMock, ) -> None: """Test _get_agent_reference_or_create when agent_version is already provided.""" @@ -205,7 +237,7 @@ async def test_azure_ai_client_get_agent_reference_or_create_existing_version( assert agent_ref == {"name": "existing-agent", "version": "1.0", "type": "agent_reference"} -async def test_azure_ai_client_get_agent_reference_or_create_missing_agent_name( +async def test_get_agent_reference_or_create_missing_agent_name( mock_project_client: MagicMock, ) -> None: """Test _get_agent_reference_or_create raises when agent_name is missing.""" @@ -215,7 +247,7 @@ async def test_azure_ai_client_get_agent_reference_or_create_missing_agent_name( await client._get_agent_reference_or_create({}, None) # type: ignore -async def test_azure_ai_client_get_agent_reference_or_create_new_agent( +async def test_get_agent_reference_or_create_new_agent( mock_project_client: MagicMock, azure_ai_unit_test_env: dict[str, str], ) -> None: @@ -239,7 +271,7 @@ async def test_azure_ai_client_get_agent_reference_or_create_new_agent( assert client.agent_version == "1.0" -async def test_azure_ai_client_get_agent_reference_missing_model( +async def test_get_agent_reference_missing_model( mock_project_client: MagicMock, ) -> None: """Test _get_agent_reference_or_create when model is missing for agent creation.""" @@ -249,7 +281,7 @@ async def test_azure_ai_client_get_agent_reference_missing_model( await client._get_agent_reference_or_create({}, None) # type: ignore -async def test_azure_ai_client_prepare_messages_for_azure_ai_with_system_messages( +async def test_prepare_messages_for_azure_ai_with_system_messages( mock_project_client: MagicMock, ) -> None: """Test _prepare_messages_for_azure_ai converts system/developer messages to instructions.""" @@ -269,7 +301,7 @@ async def test_azure_ai_client_prepare_messages_for_azure_ai_with_system_message assert instructions == "You are a helpful assistant." -async def test_azure_ai_client_prepare_messages_for_azure_ai_no_system_messages( +async def test_prepare_messages_for_azure_ai_no_system_messages( mock_project_client: MagicMock, ) -> None: """Test _prepare_messages_for_azure_ai with no system/developer messages.""" @@ -286,7 +318,7 @@ async def test_azure_ai_client_prepare_messages_for_azure_ai_no_system_messages( assert instructions is None -def test_azure_ai_client_transform_input_for_azure_ai(mock_project_client: MagicMock) -> None: +def test_transform_input_for_azure_ai(mock_project_client: MagicMock) -> None: """Test _transform_input_for_azure_ai adds required fields for Azure AI schema. WORKAROUND TEST: Azure AI Projects API requires 'type' at item level and @@ -331,7 +363,7 @@ def test_azure_ai_client_transform_input_for_azure_ai(mock_project_client: Magic assert result[1]["content"][0]["text"] == "Hi there!" -def test_azure_ai_client_transform_input_preserves_existing_fields(mock_project_client: MagicMock) -> None: +def test_transform_input_preserves_existing_fields(mock_project_client: MagicMock) -> None: """Test _transform_input_for_azure_ai preserves existing type and annotations.""" client = create_test_azure_ai_client(mock_project_client) @@ -353,7 +385,7 @@ def test_azure_ai_client_transform_input_preserves_existing_fields(mock_project_ assert result[0]["content"][0]["annotations"] == [{"some": "annotation"}] -def test_azure_ai_client_transform_input_handles_non_dict_content(mock_project_client: MagicMock) -> None: +def test_transform_input_handles_non_dict_content(mock_project_client: MagicMock) -> None: """Test _transform_input_for_azure_ai handles non-dict content items.""" client = create_test_azure_ai_client(mock_project_client) @@ -373,12 +405,11 @@ def test_azure_ai_client_transform_input_handles_non_dict_content(mock_project_c assert result[0]["content"] == ["plain string content"] -async def test_azure_ai_client_prepare_options_basic(mock_project_client: MagicMock) -> None: +async def test_prepare_options_basic(mock_project_client: MagicMock) -> None: """Test prepare_options basic functionality.""" client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions() with ( patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), @@ -388,7 +419,7 @@ async def test_azure_ai_client_prepare_options_basic(mock_project_client: MagicM return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, ), ): - run_options = await client._prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, {}) assert "extra_body" in run_options assert run_options["extra_body"]["agent"]["name"] == "test-agent" @@ -401,7 +432,7 @@ async def test_azure_ai_client_prepare_options_basic(mock_project_client: MagicM ("https://example.com/api/projects/my-project", True), ], ) -async def test_azure_ai_client_prepare_options_with_application_endpoint( +async def test_prepare_options_with_application_endpoint( mock_azure_credential: MagicMock, endpoint: str, expects_agent: bool ) -> None: client = AzureAIClient( @@ -413,7 +444,6 @@ async def test_azure_ai_client_prepare_options_with_application_endpoint( ) messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions() with ( patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), @@ -423,7 +453,7 @@ async def test_azure_ai_client_prepare_options_with_application_endpoint( return_value={"name": "test-agent", "version": "1", "type": "agent_reference"}, ), ): - run_options = await client._prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, {}) if expects_agent: assert "extra_body" in run_options @@ -439,7 +469,7 @@ async def test_azure_ai_client_prepare_options_with_application_endpoint( ("https://example.com/api/projects/my-project", True), ], ) -async def test_azure_ai_client_prepare_options_with_application_project_client( +async def test_prepare_options_with_application_project_client( mock_project_client: MagicMock, endpoint: str, expects_agent: bool ) -> None: mock_project_client._config = MagicMock() @@ -453,7 +483,6 @@ async def test_azure_ai_client_prepare_options_with_application_project_client( ) messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions() with ( patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), @@ -463,7 +492,7 @@ async def test_azure_ai_client_prepare_options_with_application_project_client( return_value={"name": "test-agent", "version": "1", "type": "agent_reference"}, ), ): - run_options = await client._prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, {}) if expects_agent: assert "extra_body" in run_options @@ -472,7 +501,7 @@ async def test_azure_ai_client_prepare_options_with_application_project_client( assert "extra_body" not in run_options -async def test_azure_ai_client_initialize_client(mock_project_client: MagicMock) -> None: +async def test_initialize_client(mock_project_client: MagicMock) -> None: """Test _initialize_client method.""" client = create_test_azure_ai_client(mock_project_client) @@ -485,7 +514,7 @@ async def test_azure_ai_client_initialize_client(mock_project_client: MagicMock) mock_project_client.get_openai_client.assert_called_once() -def test_azure_ai_client_update_agent_name_and_description(mock_project_client: MagicMock) -> None: +def test_update_agent_name_and_description(mock_project_client: MagicMock) -> None: """Test _update_agent_name_and_description method.""" client = create_test_azure_ai_client(mock_project_client) @@ -506,7 +535,7 @@ def test_azure_ai_client_update_agent_name_and_description(mock_project_client: mock_update.assert_called_once_with(None) -async def test_azure_ai_client_async_context_manager(mock_project_client: MagicMock) -> None: +async def test_async_context_manager(mock_project_client: MagicMock) -> None: """Test async context manager functionality.""" client = create_test_azure_ai_client(mock_project_client, should_close_client=True) @@ -519,7 +548,7 @@ async def test_azure_ai_client_async_context_manager(mock_project_client: MagicM mock_project_client.close.assert_called_once() -async def test_azure_ai_client_close_method(mock_project_client: MagicMock) -> None: +async def test_close_method(mock_project_client: MagicMock) -> None: """Test close method.""" client = create_test_azure_ai_client(mock_project_client, should_close_client=True) @@ -530,7 +559,7 @@ async def test_azure_ai_client_close_method(mock_project_client: MagicMock) -> N mock_project_client.close.assert_called_once() -async def test_azure_ai_client_close_client_when_should_close_false(mock_project_client: MagicMock) -> None: +async def test_close_client_when_should_close_false(mock_project_client: MagicMock) -> None: """Test _close_client_if_needed when should_close_client is False.""" client = create_test_azure_ai_client(mock_project_client, should_close_client=False) @@ -542,7 +571,7 @@ async def test_azure_ai_client_close_client_when_should_close_false(mock_project mock_project_client.close.assert_not_called() -async def test_azure_ai_client_agent_creation_with_instructions( +async def test_agent_creation_with_instructions( mock_project_client: MagicMock, ) -> None: """Test agent creation with combined instructions.""" @@ -564,7 +593,7 @@ async def test_azure_ai_client_agent_creation_with_instructions( assert call_args[1]["definition"].instructions == "Message instructions. Option instructions. " -async def test_azure_ai_client_agent_creation_with_additional_args( +async def test_agent_creation_with_additional_args( mock_project_client: MagicMock, ) -> None: """Test agent creation with additional arguments.""" @@ -588,7 +617,7 @@ async def test_azure_ai_client_agent_creation_with_additional_args( assert definition.top_p == 0.8 -async def test_azure_ai_client_agent_creation_with_tools( +async def test_agent_creation_with_tools( mock_project_client: MagicMock, ) -> None: """Test agent creation with tools.""" @@ -610,7 +639,7 @@ async def test_azure_ai_client_agent_creation_with_tools( assert call_args[1]["definition"].tools == test_tools -async def test_azure_ai_client_use_latest_version_existing_agent( +async def test_use_latest_version_existing_agent( mock_project_client: MagicMock, ) -> None: """Test _get_agent_reference_or_create when use_latest_version=True and agent exists.""" @@ -634,7 +663,7 @@ async def test_azure_ai_client_use_latest_version_existing_agent( assert client.agent_version == "2.5" -async def test_azure_ai_client_use_latest_version_agent_not_found( +async def test_use_latest_version_agent_not_found( mock_project_client: MagicMock, ) -> None: """Test _get_agent_reference_or_create when use_latest_version=True but agent doesn't exist.""" @@ -663,7 +692,7 @@ async def test_azure_ai_client_use_latest_version_agent_not_found( assert client.agent_version == "1.0" -async def test_azure_ai_client_use_latest_version_false( +async def test_use_latest_version_false( mock_project_client: MagicMock, ) -> None: """Test _get_agent_reference_or_create when use_latest_version=False (default behavior).""" @@ -685,7 +714,7 @@ async def test_azure_ai_client_use_latest_version_false( assert agent_ref == {"name": "test-agent", "version": "1.0", "type": "agent_reference"} -async def test_azure_ai_client_use_latest_version_with_existing_agent_version( +async def test_use_latest_version_with_existing_agent_version( mock_project_client: MagicMock, ) -> None: """Test that use_latest_version is ignored when agent_version is already provided.""" @@ -711,7 +740,7 @@ class ResponseFormatModel(BaseModel): model_config = ConfigDict(extra="forbid") -async def test_azure_ai_client_agent_creation_with_response_format( +async def test_agent_creation_with_response_format( mock_project_client: MagicMock, ) -> None: """Test agent creation with response_format configuration.""" @@ -724,7 +753,7 @@ async def test_azure_ai_client_agent_creation_with_response_format( mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent) run_options = {"model": "test-model"} - chat_options = ChatOptions(response_format=ResponseFormatModel) + chat_options = {"response_format": ResponseFormatModel} await client._get_agent_reference_or_create(run_options, None, chat_options) # type: ignore @@ -751,9 +780,10 @@ async def test_azure_ai_client_agent_creation_with_response_format( assert "name" in schema["properties"] assert "value" in schema["properties"] assert "description" in schema["properties"] + assert "additionalProperties" in schema -async def test_azure_ai_client_agent_creation_with_mapping_response_format( +async def test_agent_creation_with_mapping_response_format( mock_project_client: MagicMock, ) -> None: """Test agent creation when response_format is provided as a mapping.""" @@ -786,9 +816,9 @@ async def test_azure_ai_client_agent_creation_with_mapping_response_format( "schema": runtime_schema, }, } - chat_options = ChatOptions(response_format=response_format_mapping) # type: ignore + chat_options = {"response_format": response_format_mapping} - await client._get_agent_reference_or_create(run_options, None, chat_options) # type: ignore + await client._get_agent_reference_or_create(run_options, None, chat_options) call_args = mock_project_client.agents.create_version.call_args created_definition = call_args[1]["definition"] @@ -802,14 +832,14 @@ async def test_azure_ai_client_agent_creation_with_mapping_response_format( assert format_config.strict is True -async def test_azure_ai_client_prepare_options_excludes_response_format( +async def test_prepare_options_excludes_response_format( mock_project_client: MagicMock, ) -> None: """Test that prepare_options excludes response_format, text, and text_format from final run options.""" client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions() + chat_options: ChatOptions = {} with ( patch.object( @@ -932,30 +962,7 @@ def test_get_conversation_id_with_parsed_response_no_conversation() -> None: assert result == "resp_parsed_12345" -@pytest.fixture -def mock_project_client() -> MagicMock: - """Fixture that provides a mock AIProjectClient.""" - mock_client = MagicMock() - - # Mock agents property - mock_client.agents = MagicMock() - mock_client.agents.create_version = AsyncMock() - - # Mock conversations property - mock_client.conversations = MagicMock() - mock_client.conversations.create = AsyncMock() - - # Mock telemetry property - mock_client.telemetry = MagicMock() - mock_client.telemetry.get_application_insights_connection_string = AsyncMock() - - # Mock get_openai_client method - mock_client.get_openai_client = AsyncMock() - - # Mock close method - mock_client.close = AsyncMock() - - return mock_client +# region Integration Tests def get_weather( @@ -965,143 +972,355 @@ def get_weather( return f"The weather in {location} is sunny with a high of 25°C." -@pytest.mark.flaky -@skip_if_azure_ai_integration_tests_disabled -async def test_azure_ai_chat_client_agent_basic_run() -> None: - """Test ChatAgent basic run functionality with AzureAIClient.""" +class OutputStruct(BaseModel): + """A structured output for testing purposes.""" + + location: str + weather: str + + +@fixture +async def client() -> AsyncGenerator[AzureAIClient, None]: + """Create a client to test with.""" + agent_name = f"test-agent-{uuid4()}" + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async with ( - temporary_chat_client(agent_name="BasicRunAgent") as chat_client, - ChatAgent(chat_client=chat_client) as agent, + AzureCliCredential() as credential, + AIProjectClient(endpoint=endpoint, credential=credential) as project_client, ): - response = await agent.run("Hello! Please respond with 'Hello World' exactly.") - - # Validate response - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - assert "Hello World" in response.text + client = AzureAIClient( + project_client=project_client, + agent_name=agent_name, + ) + try: + client.function_invocation_configuration.max_iterations = 1 + yield client + finally: + await project_client.agents.delete(agent_name=agent_name) @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled -async def test_azure_ai_chat_client_agent_basic_run_streaming() -> None: - """Test ChatAgent basic streaming functionality with AzureAIClient.""" - async with ( - temporary_chat_client(agent_name="BasicRunStreamingAgent") as chat_client, - ChatAgent(chat_client=chat_client) as agent, - ): - full_message: str = "" - async for chunk in agent.run_stream("Please respond with exactly: 'This is a streaming response test.'"): - assert chunk is not None - assert isinstance(chunk, AgentRunResponseUpdate) - if chunk.text: - full_message += chunk.text +@pytest.mark.parametrize( + "option_name,option_value,needs_validation", + [ + # Simple ChatOptions - just verify they don't fail + param("top_p", 0.9, False, id="top_p"), + param("max_tokens", 500, False, id="max_tokens"), + param("seed", 123, False, id="seed"), + param("user", "test-user-id", False, id="user"), + param("metadata", {"test_key": "test_value"}, False, id="metadata"), + param("frequency_penalty", 0.5, False, id="frequency_penalty"), + param("presence_penalty", 0.3, False, id="presence_penalty"), + param("stop", ["END"], False, id="stop"), + param("allow_multiple_tool_calls", True, False, id="allow_multiple_tool_calls"), + param("tool_choice", "none", True, id="tool_choice_none"), + param("tool_choice", "auto", True, id="tool_choice_auto"), + param("tool_choice", "required", True, id="tool_choice_required_any"), + param( + "tool_choice", + {"mode": "required", "required_function_name": "get_weather"}, + True, + id="tool_choice_required", + ), + # OpenAIResponsesOptions - just verify they don't fail + param("safety_identifier", "user-hash-abc123", False, id="safety_identifier"), + param("truncation", "auto", False, id="truncation"), + param("top_logprobs", 5, False, id="top_logprobs"), + param("prompt_cache_key", "test-cache-key", False, id="prompt_cache_key"), + param("max_tool_calls", 3, False, id="max_tool_calls"), + ], +) +async def test_integration_options( + option_name: str, + option_value: Any, + needs_validation: bool, + client: AzureAIClient, +) -> None: + """Parametrized test covering options that can be set at runtime for a Foundry Agent. - # Validate streaming response - assert len(full_message) > 0 - assert "streaming response test" in full_message.lower() + Tests both streaming and non-streaming modes for each option to ensure + they don't cause failures. Options marked with needs_validation also + check that the feature actually works correctly. + + This test reuses a single agent. + """ + # Prepare test message + if option_name.startswith("tool_choice"): + # Use weather-related prompt for tool tests + messages = [ChatMessage(role="user", text="What is the weather in Seattle?")] + else: + # Generic prompt for simple options + messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + + # Build options dict + options: dict[str, Any] = {option_name: option_value, "tools": [get_weather]} + + for streaming in [False, True]: + if streaming: + # Test streaming mode + response_gen = client.get_streaming_response( + messages=messages, + options=options, + ) + + output_format = option_value if option_name == "response_format" else None + response = await ChatResponse.from_chat_response_generator(response_gen, output_format_type=output_format) + else: + # Test non-streaming mode + response = await client.get_response( + messages=messages, + options=options, + ) + + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None, f"No text in response for option '{option_name}'" + assert len(response.text) > 0, f"Empty response for option '{option_name}'" + + # Validate based on option type + if needs_validation: + if option_name.startswith("tool_choice"): + # Should have called the weather function + text = response.text.lower() + assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" + elif option_name == "response_format": + if option_value == OutputStruct: + # Should have structured output + assert response.value is not None, "No structured output" + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + # Runtime JSON schema + assert response.value is None, "No structured output, can't parse any json." + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value + assert "seattle" in response_value["location"].lower() @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled -async def test_azure_ai_chat_client_agent_with_tools() -> None: - """Test ChatAgent tools with AzureAIClient.""" - async with ( - temporary_chat_client(agent_name="RunToolsAgent") as chat_client, - ChatAgent(chat_client=chat_client, tools=[get_weather]) as agent, - ): - response = await agent.run("What's the weather like in Seattle?") +@pytest.mark.parametrize( + "option_name,option_value,needs_validation", + [ + param("temperature", 0.7, False, id="temperature"), + # Complex options requiring output validation + param("response_format", OutputStruct, True, id="response_format_pydantic"), + param( + "response_format", + { + "type": "json_schema", + "json_schema": { + "name": "WeatherDigest", + "strict": True, + "schema": { + "title": "WeatherDigest", + "type": "object", + "properties": { + "location": {"type": "string"}, + "conditions": {"type": "string"}, + "temperature_c": {"type": "number"}, + "advisory": {"type": "string"}, + }, + "required": ["location", "conditions", "temperature_c", "advisory"], + "additionalProperties": False, + }, + }, + }, + True, + id="response_format_runtime_json_schema", + ), + ], +) +async def test_integration_agent_options( + option_name: str, + option_value: Any, + needs_validation: bool, +) -> None: + """Test Foundry agent level options in both streaming and non-streaming modes. - # Validate response - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - assert any(word in response.text.lower() for word in ["sunny", "25"]) + Tests both streaming and non-streaming modes for each option to ensure + they don't cause failures. Options marked with needs_validation also + check that the feature actually works correctly. + This test create a new client and uses it for both streaming and non-streaming tests. + """ + async with temporary_chat_client(agent_name=f"test-agent-{option_name.replace('_', '-')}-{uuid4()}") as client: + for streaming in [False, True]: + # Prepare test message + if option_name.startswith("response_format"): + # Use prompt that works well with structured output + messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")] + messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + else: + # Generic prompt for simple options + messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + + # Build options dict + options = {option_name: option_value} + + if streaming: + # Test streaming mode + response_gen = client.get_streaming_response( + messages=messages, + options=options, + ) + + output_format = option_value if option_name.startswith("response_format") else None + response = await ChatResponse.from_chat_response_generator( + response_gen, output_format_type=output_format + ) + else: + # Test non-streaming mode + response = await client.get_response( + messages=messages, + options=options, + ) + + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None, f"No text in response for option '{option_name}'" + assert len(response.text) > 0, f"Empty response for option '{option_name}'" + + # Validate based on option type + if needs_validation and option_name.startswith("response_format"): + if option_value == OutputStruct: + # Should have structured output + assert response.value is not None, "No structured output" + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + # Runtime JSON schema + assert response.value is None, "No structured output, can't parse any json." + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value + assert "seattle" in response_value["location"].lower() -class ReleaseBrief(BaseModel): - """Structured output model for release brief.""" - title: str = Field(description="A short title for the release.") - summary: str = Field(description="A brief summary of what was released.") - highlights: list[str] = Field(description="Key highlights from the release.") - model_config = ConfigDict(extra="forbid") +@pytest.mark.flaky +@skip_if_azure_ai_integration_tests_disabled +async def test_integration_web_search() -> None: + async with temporary_chat_client(agent_name="af-int-test-web-search") as client: + for streaming in [False, True]: + content = { + "messages": "Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", + "options": { + "tool_choice": "auto", + "tools": [HostedWebSearchTool()], + }, + } + if streaming: + response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content)) + else: + response = await client.get_response(**content) + + assert response is not None + assert isinstance(response, ChatResponse) + assert "Rumi" in response.text + assert "Mira" in response.text + assert "Zoey" in response.text + + # Test that the client will use the web search tool with location + additional_properties = { + "user_location": { + "country": "US", + "city": "Seattle", + } + } + content = { + "messages": "What is the current weather? Do not ask for my current location.", + "options": { + "tool_choice": "auto", + "tools": [HostedWebSearchTool(additional_properties=additional_properties)], + }, + } + if streaming: + response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content)) + else: + response = await client.get_response(**content) + assert response.text is not None @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled -async def test_azure_ai_chat_client_agent_with_response_format() -> None: - """Test ChatAgent with response_format (structured output) using AzureAIClient.""" - async with ( - temporary_chat_client(agent_name="ResponseFormatAgent") as chat_client, - ChatAgent(chat_client=chat_client) as agent, - ): - response = await agent.run( - "Summarize the following release notes into a ReleaseBrief:\n\n" - "Version 2.0 Release Notes:\n" - "- Added new streaming API for real-time responses\n" - "- Improved error handling with detailed messages\n" - "- Performance boost of 50% in batch processing\n" - "- Fixed memory leak in connection pooling", - response_format=ReleaseBrief, +async def test_integration_agent_hosted_mcp_tool() -> None: + """Integration test for HostedMCPTool with Azure Response Agent using Microsoft Learn MCP.""" + async with temporary_chat_client(agent_name="af-int-test-mcp") as client: + response = await client.get_response( + "How to create an Azure storage account using az cli?", + options={ + # this needs to be high enough to handle the full MCP tool response. + "max_tokens": 5000, + "tools": HostedMCPTool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + description="A Microsoft Learn MCP server for documentation questions", + approval_mode="never_require", + ), + }, ) - - # Validate response - assert isinstance(response, AgentRunResponse) - assert response.value is not None - assert isinstance(response.value, ReleaseBrief) - - # Validate structured output fields - brief = response.value - assert len(brief.title) > 0 - assert len(brief.summary) > 0 - assert len(brief.highlights) > 0 + assert isinstance(response, ChatResponse) + assert response.text + # Should contain Azure-related content since it's asking about Azure CLI + assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled -async def test_azure_ai_chat_client_agent_with_runtime_json_schema() -> None: - """Test ChatAgent with runtime JSON schema (structured output) using AzureAIClient.""" - runtime_schema = { - "title": "WeatherDigest", - "type": "object", - "properties": { - "location": {"type": "string"}, - "conditions": {"type": "string"}, - "temperature_c": {"type": "number"}, - "advisory": {"type": "string"}, - }, - "required": ["location", "conditions", "temperature_c", "advisory"], - "additionalProperties": False, - } - - async with ( - temporary_chat_client(agent_name="RuntimeSchemaAgent") as chat_client, - ChatAgent(chat_client=chat_client) as agent, - ): - response = await agent.run( - "Give a brief weather digest for Seattle.", - additional_chat_options={ - "response_format": { - "type": "json_schema", - "json_schema": { - "name": runtime_schema["title"], - "strict": True, - "schema": runtime_schema, - }, - }, +async def test_integration_agent_hosted_code_interpreter_tool(): + """Test Azure Responses Client agent with HostedCodeInterpreterTool through AzureAIClient.""" + async with temporary_chat_client(agent_name="af-int-test-code-interpreter") as client: + response = await client.get_response( + "Calculate the sum of numbers from 1 to 10 using Python code.", + options={ + "tools": [HostedCodeInterpreterTool()], }, ) + # Should contain calculation result (sum of 1-10 = 55) or code execution content + contains_relevant_content = any( + term in response.text.lower() for term in ["55", "sum", "code", "python", "calculate", "10"] + ) + assert contains_relevant_content or len(response.text.strip()) > 10 - # Validate response - assert isinstance(response, AgentRunResponse) - assert response.text is not None - # Parse JSON and validate structure - import json +@pytest.mark.flaky +@skip_if_azure_ai_integration_tests_disabled +async def test_integration_agent_existing_thread(): + """Test Azure Responses Client agent with existing thread to continue conversations across agent instances.""" + # First conversation - capture the thread + preserved_thread = None - parsed = json.loads(response.text) - assert "location" in parsed - assert "conditions" in parsed - assert "temperature_c" in parsed - assert "advisory" in parsed + async with ( + temporary_chat_client(agent_name="af-int-test-existing-thread") as client, + ChatAgent( + chat_client=client, + instructions="You are a helpful assistant with good memory.", + ) as first_agent, + ): + # Start a conversation and capture the thread + thread = first_agent.get_new_thread() + first_response = await first_agent.run("My hobby is photography. Remember this.", thread=thread, store=True) + + assert isinstance(first_response, AgentRunResponse) + assert first_response.text is not None + + # Preserve the thread for reuse + preserved_thread = thread + + # Second conversation - reuse the thread in a new agent instance + if preserved_thread: + async with ( + temporary_chat_client(agent_name="af-int-test-existing-thread-2") as client, + ChatAgent( + chat_client=client, + instructions="You are a helpful assistant with good memory.", + ) as second_agent, + ): + # Reuse the preserved thread + second_response = await second_agent.run("What is my hobby?", thread=preserved_thread) + + assert isinstance(second_response, AgentRunResponse) + assert second_response.text is not None + assert "photography" in second_response.text.lower() diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_entities.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_entities.py index 2cc86c1b65..8be9fb8ded 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_entities.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_entities.py @@ -153,11 +153,11 @@ async def run( for m in entry.messages ] - run_kwargs: dict[str, Any] = {"messages": chat_messages} + run_kwargs: dict[str, Any] = {"messages": chat_messages, "options": {}} if not enable_tool_calls: - run_kwargs["tools"] = None + run_kwargs["options"]["tools"] = None if response_format: - run_kwargs["response_format"] = response_format + run_kwargs["options"]["response_format"] = response_format agent_run_response: AgentRunResponse = await self._invoke_agent( run_kwargs=run_kwargs, diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py index d3b98b05ff..bb8afd29ba 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py @@ -6,7 +6,7 @@ """ import uuid -from collections.abc import AsyncIterator, Callable +from collections.abc import AsyncIterator, Callable, Sequence from typing import TYPE_CHECKING, Any, TypeAlias, cast from agent_framework import ( @@ -193,7 +193,7 @@ def __init__(self, context: AgentOrchestrationContextType, agent_name: str): # a typed AgentRunResponse result. def run( # type: ignore[override] self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, *, thread: AgentThread | None = None, response_format: type[BaseModel] | None = None, @@ -282,7 +282,7 @@ def my_orchestration(context): def run_stream( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -327,7 +327,7 @@ def _messages_to_string(self, messages: list[ChatMessage]) -> str: """ return "\n".join([msg.text or "" for msg in messages]) - def _normalize_messages(self, messages: str | ChatMessage | list[str] | list[ChatMessage] | None) -> str: + def _normalize_messages(self, messages: str | ChatMessage | Sequence[str | ChatMessage] | None) -> str: """Convert supported message inputs to a single string.""" if messages is None: return "" diff --git a/python/packages/bedrock/agent_framework_bedrock/__init__.py b/python/packages/bedrock/agent_framework_bedrock/__init__.py index 84f3e5946c..c33badcb35 100644 --- a/python/packages/bedrock/agent_framework_bedrock/__init__.py +++ b/python/packages/bedrock/agent_framework_bedrock/__init__.py @@ -2,7 +2,7 @@ import importlib.metadata -from ._chat_client import BedrockChatClient +from ._chat_client import BedrockChatClient, BedrockChatOptions, BedrockGuardrailConfig, BedrockSettings try: __version__ = importlib.metadata.version(__name__) @@ -11,5 +11,8 @@ __all__ = [ "BedrockChatClient", + "BedrockChatOptions", + "BedrockGuardrailConfig", + "BedrockSettings", "__version__", ] diff --git a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py index c1e404834f..e9e1eeff96 100644 --- a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py +++ b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py @@ -2,9 +2,10 @@ import asyncio import json +import sys from collections import deque from collections.abc import AsyncIterable, MutableMapping, MutableSequence, Sequence -from typing import Any, ClassVar +from typing import Any, ClassVar, Generic, Literal, TypedDict from uuid import uuid4 from agent_framework import ( @@ -28,6 +29,7 @@ prepare_function_call_results, use_chat_middleware, use_function_invocation, + validate_tool_mode, ) from agent_framework._pydantic import AFBaseSettings from agent_framework.exceptions import ServiceInitializationError, ServiceInvalidResponseError @@ -37,11 +39,151 @@ from botocore.config import Config as BotoConfig from pydantic import SecretStr, ValidationError +if sys.version_info >= (3, 13): + from typing import TypeVar +else: + from typing_extensions import TypeVar + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + logger = get_logger("agent_framework.bedrock") + +__all__ = [ + "BedrockChatClient", + "BedrockChatOptions", + "BedrockGuardrailConfig", + "BedrockSettings", +] + + +# region Bedrock Chat Options TypedDict + + DEFAULT_REGION = "us-east-1" DEFAULT_MAX_TOKENS = 1024 + +class BedrockGuardrailConfig(TypedDict, total=False): + """Amazon Bedrock Guardrails configuration. + + See: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + """ + + guardrailIdentifier: str + """The identifier of the guardrail to apply.""" + + guardrailVersion: str + """The version of the guardrail to use.""" + + trace: Literal["enabled", "disabled"] + """Whether to include guardrail trace information in the response.""" + + streamProcessingMode: Literal["sync", "async"] + """How to process guardrails during streaming (sync blocks, async does not).""" + + +class BedrockChatOptions(ChatOptions, total=False): + """Amazon Bedrock Converse API-specific chat options dict. + + Extends base ChatOptions with Bedrock-specific parameters. + Bedrock uses a unified Converse API that works across multiple + foundation models (Claude, Titan, Llama, etc.). + + See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html + + Keys: + # Inherited from ChatOptions (mapped to Bedrock): + model_id: The Bedrock model identifier, + translates to ``modelId`` in Bedrock API. + temperature: Sampling temperature, + translates to ``inferenceConfig.temperature``. + top_p: Nucleus sampling parameter, + translates to ``inferenceConfig.topP``. + max_tokens: Maximum number of tokens to generate, + translates to ``inferenceConfig.maxTokens``. + stop: Stop sequences, + translates to ``inferenceConfig.stopSequences``. + tools: List of tools available to the model, + translates to ``toolConfig.tools``. + tool_choice: How the model should use tools, + translates to ``toolConfig.toolChoice``. + + # Options not supported in Bedrock Converse API: + seed: Not supported. + frequency_penalty: Not supported. + presence_penalty: Not supported. + allow_multiple_tool_calls: Not supported (models handle parallel calls automatically). + response_format: Not directly supported (use model-specific prompting). + user: Not supported. + store: Not supported. + logit_bias: Not supported. + metadata: Not supported (use additional_properties for additionalModelRequestFields). + + # Bedrock-specific options: + guardrailConfig: Guardrails configuration for content filtering. + performanceConfig: Performance optimization settings. + requestMetadata: Key-value metadata for the request. + promptVariables: Variables for prompt management (if using managed prompts). + """ + + # Bedrock-specific options + guardrailConfig: BedrockGuardrailConfig + """Guardrails configuration for content filtering and safety.""" + + performanceConfig: dict[str, Any] + """Performance optimization settings (e.g., latency optimization). + See: https://docs.aws.amazon.com/bedrock/latest/userguide/inference-performance.html""" + + requestMetadata: dict[str, str] + """Key-value metadata for the request (max 2048 characters total).""" + + promptVariables: dict[str, dict[str, str]] + """Variables for prompt management when using managed prompts.""" + + # ChatOptions fields not supported in Bedrock + seed: None # type: ignore[misc] + """Not supported in Bedrock Converse API.""" + + frequency_penalty: None # type: ignore[misc] + """Not supported in Bedrock Converse API.""" + + presence_penalty: None # type: ignore[misc] + """Not supported in Bedrock Converse API.""" + + allow_multiple_tool_calls: None # type: ignore[misc] + """Not supported. Bedrock models handle parallel tool calls automatically.""" + + response_format: None # type: ignore[misc] + """Not directly supported. Use model-specific prompting for JSON output.""" + + user: None # type: ignore[misc] + """Not supported in Bedrock Converse API.""" + + store: None # type: ignore[misc] + """Not supported in Bedrock Converse API.""" + + logit_bias: None # type: ignore[misc] + """Not supported in Bedrock Converse API.""" + + +BEDROCK_OPTION_TRANSLATIONS: dict[str, str] = { + "model_id": "modelId", + "max_tokens": "maxTokens", + "top_p": "topP", + "stop": "stopSequences", +} +"""Maps ChatOptions keys to Bedrock Converse API parameter names.""" + +TBedrockChatOptions = TypeVar("TBedrockChatOptions", bound=TypedDict, default="BedrockChatOptions", covariant=True) # type: ignore[valid-type] + + +# endregion + + ROLE_MAP: dict[Role, str] = { Role.USER: "user", Role.ASSISTANT: "assistant", @@ -74,7 +216,7 @@ class BedrockSettings(AFBaseSettings): @use_function_invocation @use_instrumentation @use_chat_middleware -class BedrockChatClient(BaseChatClient): +class BedrockChatClient(BaseChatClient[TBedrockChatOptions], Generic[TBedrockChatOptions]): """Async chat client for Amazon Bedrock's Converse API.""" OTEL_PROVIDER_NAME: ClassVar[str] = "aws.bedrock" # type: ignore[reportIncompatibleVariableOverride, misc] @@ -106,6 +248,26 @@ def __init__( env_file_path: Optional .env file path used by ``BedrockSettings`` to load defaults. env_file_encoding: Encoding for the optional .env file. kwargs: Additional arguments forwarded to ``BaseChatClient``. + + Examples: + .. code-block:: python + + from agent_framework.bedrock import BedrockChatClient + + # Basic usage with default credentials + client = BedrockChatClient(model_id="") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework_bedrock import BedrockChatOptions + + + class MyOptions(BedrockChatOptions, total=False): + my_custom_option: str + + + client = BedrockChatClient[MyOptions](model_id="") + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ try: settings = BedrockSettings( @@ -143,25 +305,27 @@ def _create_session(settings: BedrockSettings) -> Boto3Session: session_kwargs["aws_session_token"] = settings.session_token.get_secret_value() return Boto3Session(**session_kwargs) + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: - request = self._build_converse_request(messages, chat_options, **kwargs) + request = self._prepare_options(messages, options, **kwargs) raw_response = await asyncio.to_thread(self._bedrock_client.converse, **request) return self._process_converse_response(raw_response) + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - response = await self._inner_get_response(messages=messages, chat_options=chat_options, **kwargs) + response = await self._inner_get_response(messages=messages, options=options, **kwargs) contents = list(response.messages[0].contents if response.messages else []) if response.usage_details: contents.append(UsageContent(details=response.usage_details)) @@ -173,13 +337,13 @@ async def _inner_get_streaming_response( raw_representation=response.raw_representation, ) - def _build_converse_request( + def _prepare_options( self, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> dict[str, Any]: - model_id = chat_options.model_id or self.model_id + model_id = options.get("model_id") or self.model_id if not model_id: raise ServiceInitializationError( "Bedrock model_id is required. Set via chat options or BEDROCK_CHAT_MODEL_ID environment variable." @@ -188,40 +352,42 @@ def _build_converse_request( system_prompts, conversation = self._prepare_bedrock_messages(messages) if not conversation: raise ServiceInitializationError("At least one non-system message is required for Bedrock requests.") + # Prepend instructions from options if they exist + if instructions := options.get("instructions"): + system_prompts = [{"text": instructions}, *system_prompts] - payload: dict[str, Any] = { + run_options: dict[str, Any] = { "modelId": model_id, "messages": conversation, + "inferenceConfig": {"maxTokens": options.get("max_tokens", DEFAULT_MAX_TOKENS)}, } if system_prompts: - payload["system"] = system_prompts - - inference_config: dict[str, Any] = {} - inference_config["maxTokens"] = ( - chat_options.max_tokens if chat_options.max_tokens is not None else DEFAULT_MAX_TOKENS - ) - if chat_options.temperature is not None: - inference_config["temperature"] = chat_options.temperature - if chat_options.top_p is not None: - inference_config["topP"] = chat_options.top_p - if chat_options.stop is not None: - inference_config["stopSequences"] = chat_options.stop - if inference_config: - payload["inferenceConfig"] = inference_config - - tool_config = self._convert_tools_to_bedrock_config(chat_options.tools) - if tool_choice := self._convert_tool_choice(chat_options.tool_choice): - if tool_config is None: - tool_config = {} - tool_config["toolChoice"] = tool_choice + run_options["system"] = system_prompts + + if (temperature := options.get("temperature")) is not None: + run_options["inferenceConfig"]["temperature"] = temperature + if (top_p := options.get("top_p")) is not None: + run_options["inferenceConfig"]["topP"] = top_p + if (stop := options.get("stop")) is not None: + run_options["inferenceConfig"]["stopSequences"] = stop + + tool_config = self._prepare_tools(options.get("tools")) + if tool_mode := validate_tool_mode(options.get("tool_choice")): + tool_config = tool_config or {} + match tool_mode.get("mode"): + case "auto" | "none": + tool_config["toolChoice"] = {tool_mode.get("mode"): {}} + case "required": + if required_name := tool_mode.get("required_function_name"): + tool_config["toolChoice"] = {"tool": {"name": required_name}} + else: + tool_config["toolChoice"] = {"any": {}} + case _: + raise ServiceInitializationError(f"Unsupported tool mode for Bedrock: {tool_mode.get('mode')}") if tool_config: - payload["toolConfig"] = tool_config + run_options["toolConfig"] = tool_config - if chat_options.additional_properties: - payload.update(chat_options.additional_properties) - if kwargs: - payload.update(kwargs) - return payload + return run_options def _prepare_bedrock_messages( self, messages: Sequence[ChatMessage] @@ -374,12 +540,10 @@ def _normalize_tool_result_value(self, value: Any) -> dict[str, Any]: return {"text": str(value)} return {"text": str(value)} - def _convert_tools_to_bedrock_config( - self, tools: list[ToolProtocol | MutableMapping[str, Any]] | None - ) -> dict[str, Any] | None: + def _prepare_tools(self, tools: list[ToolProtocol | MutableMapping[str, Any]] | None) -> dict[str, Any] | None: + converted: list[dict[str, Any]] = [] if not tools: return None - converted: list[dict[str, Any]] = [] for tool in tools: if isinstance(tool, MutableMapping): converted.append(dict(tool)) @@ -396,24 +560,6 @@ def _convert_tools_to_bedrock_config( logger.debug("Ignoring unsupported tool type for Bedrock: %s", type(tool)) return {"tools": converted} if converted else None - def _convert_tool_choice(self, tool_choice: Any) -> dict[str, Any] | None: - if not tool_choice: - return None - mode = tool_choice.mode if hasattr(tool_choice, "mode") else str(tool_choice) - required_name = getattr(tool_choice, "required_function_name", None) - match mode: - case "auto": - return {"auto": {}} - case "none": - return {"none": {}} - case "required": - if required_name: - return {"tool": {"name": required_name}} - return {"any": {}} - case _: - logger.debug("Unsupported tool choice mode for Bedrock: %s", mode) - return None - @staticmethod def _generate_tool_call_id() -> str: return f"tool-call-{uuid4().hex}" diff --git a/python/packages/bedrock/samples/bedrock_sample.py b/python/packages/bedrock/samples/bedrock_sample.py index 9e14b5a385..4b5ca8a9a1 100644 --- a/python/packages/bedrock/samples/bedrock_sample.py +++ b/python/packages/bedrock/samples/bedrock_sample.py @@ -11,7 +11,6 @@ FunctionResultContent, Role, TextContent, - ToolMode, ai_function, ) @@ -31,7 +30,7 @@ async def main() -> None: chat_client=BedrockChatClient(), instructions="You are a concise travel assistant.", name="BedrockWeatherAgent", - tool_choice=ToolMode.AUTO, + tool_choice="auto", tools=[get_weather], ) diff --git a/python/packages/bedrock/tests/test_bedrock_client.py b/python/packages/bedrock/tests/test_bedrock_client.py index 4086dfa429..5842426483 100644 --- a/python/packages/bedrock/tests/test_bedrock_client.py +++ b/python/packages/bedrock/tests/test_bedrock_client.py @@ -6,7 +6,7 @@ from typing import Any import pytest -from agent_framework import ChatMessage, ChatOptions, Role, TextContent +from agent_framework import ChatMessage, Role, TextContent from agent_framework.exceptions import ServiceInitializationError from agent_framework_bedrock import BedrockChatClient @@ -46,7 +46,7 @@ def test_get_response_invokes_bedrock_runtime() -> None: ChatMessage(role=Role.USER, contents=[TextContent(text="hello")]), ] - response = asyncio.run(client.get_response(messages=messages, chat_options=ChatOptions(max_tokens=32))) + response = asyncio.run(client.get_response(messages=messages, options={"max_tokens": 32})) assert stub.calls, "Expected the runtime client to be called" payload = stub.calls[0] @@ -66,4 +66,4 @@ def test_build_request_requires_non_system_messages() -> None: messages = [ChatMessage(role=Role.SYSTEM, contents=[TextContent(text="Only system text")])] with pytest.raises(ServiceInitializationError): - client._build_converse_request(messages, ChatOptions()) + client._prepare_options(messages, {}) diff --git a/python/packages/bedrock/tests/test_bedrock_settings.py b/python/packages/bedrock/tests/test_bedrock_settings.py index a3b0894d28..1924c750c6 100644 --- a/python/packages/bedrock/tests/test_bedrock_settings.py +++ b/python/packages/bedrock/tests/test_bedrock_settings.py @@ -13,7 +13,6 @@ FunctionResultContent, Role, TextContent, - ToolMode, ) from pydantic import BaseModel @@ -46,10 +45,13 @@ def test_build_request_includes_tool_config() -> None: client = _build_client() tool = AIFunction(name="get_weather", description="desc", func=_dummy_weather, input_model=_WeatherArgs) - options = ChatOptions(tools=[tool], tool_choice=ToolMode.REQUIRED("get_weather")) + options = { + "tools": [tool], + "tool_choice": {"mode": "required", "required_function_name": "get_weather"}, + } messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="hi")])] - request = client._build_converse_request(messages, options) + request = client._prepare_options(messages, options) assert request["toolConfig"]["tools"][0]["toolSpec"]["name"] == "get_weather" assert request["toolConfig"]["toolChoice"] == {"tool": {"name": "get_weather"}} @@ -57,7 +59,7 @@ def test_build_request_includes_tool_config() -> None: def test_build_request_serializes_tool_history() -> None: client = _build_client() - options = ChatOptions() + options: ChatOptions = {} messages = [ ChatMessage(role=Role.USER, contents=[TextContent(text="how's weather?")]), ChatMessage( @@ -70,7 +72,7 @@ def test_build_request_serializes_tool_history() -> None: ), ] - request = client._build_converse_request(messages, options) + request = client._prepare_options(messages, options) assistant_block = request["messages"][1]["content"][0]["toolUse"] result_block = request["messages"][2]["content"][0]["toolResult"] diff --git a/python/packages/chatkit/agent_framework_chatkit/_converter.py b/python/packages/chatkit/agent_framework_chatkit/_converter.py index 1070d83926..252ac8a753 100644 --- a/python/packages/chatkit/agent_framework_chatkit/_converter.py +++ b/python/packages/chatkit/agent_framework_chatkit/_converter.py @@ -6,11 +6,6 @@ import sys from collections.abc import Awaitable, Callable, Sequence -if sys.version_info >= (3, 11): - from typing import assert_never -else: - from typing_extensions import assert_never - from agent_framework import ( ChatMessage, DataContent, @@ -38,6 +33,11 @@ WorkflowItem, ) +if sys.version_info >= (3, 11): + from typing import assert_never +else: + from typing_extensions import assert_never + logger = logging.getLogger(__name__) diff --git a/python/packages/core/agent_framework/_agents.py b/python/packages/core/agent_framework/_agents.py index 78164ad2a0..ddd7c06b4b 100644 --- a/python/packages/core/agent_framework/_agents.py +++ b/python/packages/core/agent_framework/_agents.py @@ -7,7 +7,16 @@ from contextlib import AbstractAsyncContextManager, AsyncExitStack from copy import deepcopy from itertools import chain -from typing import Any, ClassVar, Literal, Protocol, TypeVar, cast, runtime_checkable +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Generic, + Protocol, + TypedDict, + cast, + runtime_checkable, +) from uuid import uuid4 from mcp import types @@ -27,27 +36,73 @@ AgentRunResponse, AgentRunResponseUpdate, ChatMessage, - ChatOptions, ChatResponse, ChatResponseUpdate, Role, - ToolMode, ) from .exceptions import AgentExecutionException, AgentInitializationError from .observability import use_agent_instrumentation +if TYPE_CHECKING: + from ._types import ChatOptions + + +if sys.version_info >= (3, 13): + from typing import TypeVar +else: + from typing_extensions import TypeVar + if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: from typing_extensions import override # type: ignore[import] # pragma: no cover + if sys.version_info >= (3, 11): from typing import Self # pragma: no cover else: from typing_extensions import Self # pragma: no cover + logger = get_logger("agent_framework") TThreadType = TypeVar("TThreadType", bound="AgentThread") +TOptions_co = TypeVar( + "TOptions_co", + bound=TypedDict, # type: ignore[valid-type] + default="ChatOptions", + covariant=True, +) + + +def _merge_options(base: dict[str, Any], override: dict[str, Any]) -> dict[str, Any]: + """Merge two options dicts, with override values taking precedence. + + Args: + base: The base options dict. + override: The override options dict (values take precedence). + + Returns: + A new merged options dict. + """ + result = dict(base) + for key, value in override.items(): + if value is None: + continue + if key == "tools" and result.get("tools"): + # Combine tool lists + result["tools"] = list(result["tools"]) + list(value) + elif key == "logit_bias" and result.get("logit_bias"): + # Merge logit_bias dicts + result["logit_bias"] = {**result["logit_bias"], **value} + elif key == "metadata" and result.get("metadata"): + # Merge metadata dicts + result["metadata"] = {**result["metadata"], **value} + elif key == "instructions" and result.get("instructions"): + # Concatenate instructions + result["instructions"] = f"{result['instructions']}\n{value}" + else: + result[key] = value + return result def _sanitize_agent_name(agent_name: str | None) -> str | None: @@ -151,7 +206,7 @@ def get_new_thread(self, **kwargs): async def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -182,7 +237,7 @@ async def run( def run_stream( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -445,7 +500,7 @@ async def agent_wrapper(**kwargs: Any) -> str: def _normalize_messages( self, - messages: str | ChatMessage | Sequence[str] | Sequence[ChatMessage] | None = None, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, ) -> list[ChatMessage]: if messages is None: return [] @@ -464,20 +519,23 @@ def _normalize_messages( @use_agent_middleware @use_agent_instrumentation(capture_usage=False) # type: ignore[arg-type,misc] -class ChatAgent(BaseAgent): # type: ignore[misc] +class ChatAgent(BaseAgent, Generic[TOptions_co]): # type: ignore[misc] """A Chat Client Agent. This is the primary agent implementation that uses a chat client to interact with language models. It supports tools, context providers, middleware, and both streaming and non-streaming responses. + The generic type parameter TOptions specifies which options TypedDict this agent + accepts. This enables IDE autocomplete and type checking for provider-specific options. + Examples: Basic usage: .. code-block:: python from agent_framework import ChatAgent - from agent_framework.clients import OpenAIChatClient + from agent_framework.openai import OpenAIChatClient # Create a basic chat agent client = OpenAIChatClient(model_id="gpt-4") @@ -509,72 +567,55 @@ def get_weather(location: str) -> str: async for update in agent.run_stream("What's the weather in Paris?"): print(update.text, end="") - With additional provider specific options: + With typed options for IDE autocomplete: .. code-block:: python - agent = ChatAgent( + from agent_framework import ChatAgent + from agent_framework.openai import OpenAIChatClient, OpenAIChatOptions + + client = OpenAIChatClient(model_id="gpt-4o") + agent: ChatAgent[OpenAIChatOptions] = ChatAgent( chat_client=client, name="reasoning-agent", instructions="You are a reasoning assistant.", - model_id="gpt-5", - temperature=0.7, - max_tokens=500, - additional_chat_options={ - "reasoning": {"effort": "high", "summary": "concise"} - }, # OpenAI Responses specific. + options={ + "temperature": 0.7, + "max_tokens": 500, + "reasoning_effort": "high", # OpenAI-specific, IDE will autocomplete! + }, ) - # Use streaming responses - async for update in agent.run_stream("How do you prove the pythagorean theorem?"): - print(update.text, end="") + # Or pass options at runtime + response = await agent.run( + "What is 25 * 47?", + options={"temperature": 0.0, "logprobs": True}, + ) """ AGENT_PROVIDER_NAME: ClassVar[str] = "microsoft.agent_framework" def __init__( self, - chat_client: ChatClientProtocol, + chat_client: ChatClientProtocol[TOptions_co], instructions: str | None = None, *, id: str | None = None, name: str | None = None, description: str | None = None, - chat_message_store_factory: Callable[[], ChatMessageStoreProtocol] | None = None, - context_provider: ContextProvider | None = None, - middleware: Sequence[Middleware] | None = None, - # chat options - allow_multiple_tool_calls: bool | None = None, - conversation_id: str | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: dict[str, Any] | None = None, - model_id: str | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] | None = None, - top_p: float | None = None, - user: str | None = None, - additional_chat_options: dict[str, Any] | None = None, + default_options: TOptions_co | None = None, + chat_message_store_factory: Callable[[], ChatMessageStoreProtocol] | None = None, + context_provider: ContextProvider | None = None, + middleware: Sequence[Middleware] | None = None, **kwargs: Any, ) -> None: """Initialize a ChatAgent instance. - Note: - The set of parameters from frequency_penalty to request_kwargs are used to - call the chat client. They can also be passed to both run methods. - When both are set, the ones passed to the run methods take precedence. - Args: chat_client: The chat client to use for the agent. instructions: Optional instructions for the agent. @@ -586,35 +627,24 @@ def __init__( description: A brief description of the agent's purpose. chat_message_store_factory: Factory function to create an instance of ChatMessageStoreProtocol. If not provided, the default in-memory store will be used. - context_provider: The context provider to include during agent invocation. - middleware: List of middleware to intercept agent, chat and function invocations. - allow_multiple_tool_calls: Whether to allow multiple tool calls in a single response. - conversation_id: The conversation ID for service-managed threads. - Cannot be used together with chat_message_store_factory. - frequency_penalty: The frequency penalty to use. - logit_bias: The logit bias to use. - max_tokens: The maximum number of tokens to generate. - metadata: Additional metadata to include in the request. - model_id: The model_id to use for the agent. - This overrides the model_id set in the chat client if it contains one. - presence_penalty: The presence penalty to use. - response_format: The format of the response. - seed: The random seed to use. - stop: The stop sequence(s) for the request. - store: Whether to store the response. - temperature: The sampling temperature to use. - tool_choice: The tool choice for the request. + context_provider: The context providers to include during agent invocation. + middleware: List of middleware to intercept agent and function invocations. + default_options: A TypedDict containing chat options. When using a typed agent like + ``ChatAgent[OpenAIChatOptions]``, this enables IDE autocomplete for + provider-specific options including temperature, max_tokens, model_id, + tool_choice, and provider-specific options like reasoning_effort. + You can also create your own TypedDict for custom chat clients. + These can be overridden at runtime via the ``options`` parameter of ``run()`` and ``run_stream()``. tools: The tools to use for the request. - top_p: The nucleus sampling probability to use. - user: The user to associate with the request. - additional_chat_options: A dictionary of other values that will be passed through - to the chat_client ``get_response`` and ``get_streaming_response`` methods. - This can be used to pass provider specific parameters. kwargs: Any additional keyword arguments. Will be stored as ``additional_properties``. Raises: AgentInitializationError: If both conversation_id and chat_message_store_factory are provided. """ + # Extract conversation_id from options for validation + opts = dict(default_options) if default_options else {} + conversation_id = opts.get("conversation_id") + if conversation_id is not None and chat_message_store_factory is not None: raise AgentInitializationError( "Cannot specify both conversation_id and chat_message_store_factory. " @@ -634,37 +664,47 @@ def __init__( middleware=middleware, **kwargs, ) - self.chat_client = chat_client + self.chat_client: ChatClientProtocol[TOptions_co] = chat_client self.chat_message_store_factory = chat_message_store_factory + # Get tools from options or named parameter (named param takes precedence) + tools_ = tools if tools is not None else opts.pop("tools", None) + + # Handle instructions - named parameter takes precedence over options + instructions_ = instructions if instructions is not None else opts.pop("instructions", None) + # We ignore the MCP Servers here and store them separately, # we add their functions to the tools list at runtime normalized_tools: list[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] = ( # type:ignore[reportUnknownVariableType] - [] if tools is None else tools if isinstance(tools, list) else [tools] # type: ignore[list-item] + [] if tools_ is None else tools_ if isinstance(tools_, list) else [tools_] # type: ignore[list-item] ) self._local_mcp_tools = [tool for tool in normalized_tools if isinstance(tool, MCPTool)] agent_tools = [tool for tool in normalized_tools if not isinstance(tool, MCPTool)] - self.chat_options = ChatOptions( - model_id=model_id or (str(chat_client.model_id) if hasattr(chat_client, "model_id") else None), - allow_multiple_tool_calls=allow_multiple_tool_calls, - conversation_id=conversation_id, - frequency_penalty=frequency_penalty, - instructions=instructions, - logit_bias=logit_bias, - max_tokens=max_tokens, - metadata=metadata, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - store=store, - temperature=temperature, - tool_choice=tool_choice, - tools=agent_tools, - top_p=top_p, - user=user, - additional_properties=additional_chat_options or {}, # type: ignore - ) + + # Build chat options dict + self.default_options: dict[str, Any] = { + "model_id": opts.pop("model_id", None) or (getattr(self.chat_client, "model_id", None)), + "allow_multiple_tool_calls": opts.pop("allow_multiple_tool_calls", None), + "conversation_id": conversation_id, + "frequency_penalty": opts.pop("frequency_penalty", None), + "instructions": instructions_, + "logit_bias": opts.pop("logit_bias", None), + "max_tokens": opts.pop("max_tokens", None), + "metadata": opts.pop("metadata", None), + "presence_penalty": opts.pop("presence_penalty", None), + "response_format": opts.pop("response_format", None), + "seed": opts.pop("seed", None), + "stop": opts.pop("stop", None), + "store": opts.pop("store", None), + "temperature": opts.pop("temperature", None), + "tool_choice": opts.pop("tool_choice", "auto"), + "tools": agent_tools, + "top_p": opts.pop("top_p", None), + "user": opts.pop("user", None), + **opts, # Remaining options are provider-specific + } + # Remove None values from chat_options + self.default_options = {k: v for k, v in self.default_options.items() if v is not None} self._async_exit_stack = AsyncExitStack() self._update_agent_name_and_description() @@ -716,30 +756,15 @@ def _update_agent_name_and_description(self) -> None: async def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, *, thread: AgentThread | None = None, - allow_multiple_tool_calls: bool | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: dict[str, Any] | None = None, - model_id: str | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] | list[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] | None = None, - top_p: float | None = None, - user: str | None = None, - additional_chat_options: dict[str, Any] | None = None, + options: TOptions_co | None = None, **kwargs: Any, ) -> AgentRunResponse: """Run the agent with the given messages and options. @@ -755,36 +780,29 @@ async def run( Keyword Args: thread: The thread to use for the agent. - allow_multiple_tool_calls: Whether to allow multiple tool calls in a single response. - frequency_penalty: The frequency penalty to use. - logit_bias: The logit bias to use. - max_tokens: The maximum number of tokens to generate. - metadata: Additional metadata to include in the request. - model_id: The model_id to use for the agent. - presence_penalty: The presence penalty to use. - response_format: The format of the response. - seed: The random seed to use. - stop: The stop sequence(s) for the request. - store: Whether to store the response. - temperature: The sampling temperature to use. - tool_choice: The tool choice for the request. - tools: The tools to use for the request. - top_p: The nucleus sampling probability to use. - user: The user to associate with the request. - additional_chat_options: Additional properties to include in the request. - Use this field for provider-specific parameters. + tools: The tools to use for this specific run (merged with default tools). + options: A TypedDict containing chat options. When using a typed agent like + ``ChatAgent[OpenAIChatOptions]``, this enables IDE autocomplete for + provider-specific options including temperature, max_tokens, model_id, + tool_choice, and provider-specific options like reasoning_effort. kwargs: Additional keyword arguments for the agent. Will only be passed to functions that are called. Returns: An AgentRunResponse containing the agent's response. """ + # Build options dict from provided options + opts = dict(options) if options else {} + + # Get tools from options or named parameter (named param takes precedence) + tools_ = tools if tools is not None else opts.pop("tools", None) + input_messages = self._normalize_messages(messages) thread, run_chat_options, thread_messages = await self._prepare_thread_and_messages( thread=thread, input_messages=input_messages, **kwargs ) normalized_tools: list[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] = ( # type:ignore[reportUnknownVariableType] - [] if tools is None else tools if isinstance(tools, list) else [tools] + [] if tools_ is None else tools_ if isinstance(tools_, list) else [tools_] ) agent_name = self._get_agent_name() @@ -804,27 +822,30 @@ async def run( await self._async_exit_stack.enter_async_context(mcp_server) final_tools.extend(mcp_server.functions) - merged_additional_options = additional_chat_options or {} - co = run_chat_options & ChatOptions( - model_id=model_id, - conversation_id=thread.service_thread_id, - allow_multiple_tool_calls=allow_multiple_tool_calls, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - max_tokens=max_tokens, - metadata=metadata, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - store=store, - temperature=temperature, - tool_choice=tool_choice, - tools=final_tools, - top_p=top_p, - user=user, - additional_properties=merged_additional_options, # type: ignore[arg-type] - ) + # Build options dict from run() options merged with provided options + run_opts: dict[str, Any] = { + "model_id": opts.pop("model_id", None), + "conversation_id": thread.service_thread_id, + "allow_multiple_tool_calls": opts.pop("allow_multiple_tool_calls", None), + "frequency_penalty": opts.pop("frequency_penalty", None), + "logit_bias": opts.pop("logit_bias", None), + "max_tokens": opts.pop("max_tokens", None), + "metadata": opts.pop("metadata", None), + "presence_penalty": opts.pop("presence_penalty", None), + "response_format": opts.pop("response_format", None), + "seed": opts.pop("seed", None), + "stop": opts.pop("stop", None), + "store": opts.pop("store", None), + "temperature": opts.pop("temperature", None), + "tool_choice": opts.pop("tool_choice", None), + "tools": final_tools, + "top_p": opts.pop("top_p", None), + "user": opts.pop("user", None), + **opts, # Remaining options are provider-specific + } + # Remove None values and merge with chat_options + run_opts = {k: v for k, v in run_opts.items() if v is not None} + co = _merge_options(run_chat_options, run_opts) # Ensure thread is forwarded in kwargs for tool invocation kwargs["thread"] = thread @@ -832,7 +853,7 @@ async def run( filtered_kwargs = {k: v for k, v in kwargs.items() if k != "chat_options"} response = await self.chat_client.get_response( messages=thread_messages, - chat_options=co, + options=co, # type: ignore[arg-type] **filtered_kwargs, ) @@ -863,30 +884,15 @@ async def run( async def run_stream( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, *, thread: AgentThread | None = None, - allow_multiple_tool_calls: bool | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: dict[str, Any] | None = None, - model_id: str | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] | list[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] | None = None, - top_p: float | None = None, - user: str | None = None, - additional_chat_options: dict[str, Any] | None = None, + options: TOptions_co | None = None, **kwargs: Any, ) -> AsyncIterable[AgentRunResponseUpdate]: """Stream the agent with the given messages and options. @@ -902,30 +908,23 @@ async def run_stream( Keyword Args: thread: The thread to use for the agent. - allow_multiple_tool_calls: Whether to allow multiple tool calls in a single response. - frequency_penalty: The frequency penalty to use. - logit_bias: The logit bias to use. - max_tokens: The maximum number of tokens to generate. - metadata: Additional metadata to include in the request. - model_id: The model_id to use for the agent. - presence_penalty: The presence penalty to use. - response_format: The format of the response. - seed: The random seed to use. - stop: The stop sequence(s) for the request. - store: Whether to store the response. - temperature: The sampling temperature to use. - tool_choice: The tool choice for the request. - tools: The tools to use for the request. - top_p: The nucleus sampling probability to use. - user: The user to associate with the request. - additional_chat_options: Additional properties to include in the request. - Use this field for provider-specific parameters. - kwargs: Any additional keyword arguments. + tools: The tools to use for this specific run (merged with agent-level tools). + options: A TypedDict containing chat options. When using a typed agent like + ``ChatAgent[OpenAIChatOptions]``, this enables IDE autocomplete for + provider-specific options including temperature, max_tokens, model_id, + tool_choice, and provider-specific options like reasoning_effort. + kwargs: Additional keyword arguments for the agent. Will only be passed to functions that are called. Yields: AgentRunResponseUpdate objects containing chunks of the agent's response. """ + # Build options dict from provided options + opts = dict(options) if options else {} + + # Get tools from options or named parameter (named param takes precedence) + tools_ = tools if tools is not None else opts.pop("tools", None) + input_messages = self._normalize_messages(messages) thread, run_chat_options, thread_messages = await self._prepare_thread_and_messages( thread=thread, input_messages=input_messages, **kwargs @@ -934,7 +933,7 @@ async def run_stream( # Resolve final tool list (runtime provided tools + local MCP server tools) final_tools: list[ToolProtocol | MutableMapping[str, Any] | Callable[..., Any]] = [] normalized_tools: list[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] = ( # type: ignore[reportUnknownVariableType] - [] if tools is None else tools if isinstance(tools, list) else [tools] + [] if tools_ is None else tools_ if isinstance(tools_, list) else [tools_] ) # Normalize tools argument to a list without mutating the original parameter for tool in normalized_tools: @@ -950,27 +949,30 @@ async def run_stream( await self._async_exit_stack.enter_async_context(mcp_server) final_tools.extend(mcp_server.functions) - merged_additional_options = additional_chat_options or {} - co = run_chat_options & ChatOptions( - conversation_id=thread.service_thread_id, - allow_multiple_tool_calls=allow_multiple_tool_calls, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - max_tokens=max_tokens, - metadata=metadata, - model_id=model_id, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - store=store, - temperature=temperature, - tool_choice=tool_choice, - tools=final_tools, - top_p=top_p, - user=user, - additional_properties=merged_additional_options, # type: ignore[arg-type] - ) + # Build options dict from run_stream() options merged with provided options + run_opts: dict[str, Any] = { + "model_id": opts.pop("model_id", None), + "conversation_id": thread.service_thread_id, + "allow_multiple_tool_calls": opts.pop("allow_multiple_tool_calls", None), + "frequency_penalty": opts.pop("frequency_penalty", None), + "logit_bias": opts.pop("logit_bias", None), + "max_tokens": opts.pop("max_tokens", None), + "metadata": opts.pop("metadata", None), + "presence_penalty": opts.pop("presence_penalty", None), + "response_format": opts.pop("response_format", None), + "seed": opts.pop("seed", None), + "stop": opts.pop("stop", None), + "store": opts.pop("store", None), + "temperature": opts.pop("temperature", None), + "tool_choice": opts.pop("tool_choice", None), + "tools": final_tools, + "top_p": opts.pop("top_p", None), + "user": opts.pop("user", None), + **opts, # Remaining options are provider-specific + } + # Remove None values and merge with chat_options + run_opts = {k: v for k, v in run_opts.items() if v is not None} + co = _merge_options(run_chat_options, run_opts) # Ensure thread is forwarded in kwargs for tool invocation kwargs["thread"] = thread @@ -979,7 +981,7 @@ async def run_stream( response_updates: list[ChatResponseUpdate] = [] async for update in self.chat_client.get_streaming_response( messages=thread_messages, - chat_options=co, + options=co, # type: ignore[arg-type] **filtered_kwargs, ): response_updates.append(update) @@ -998,7 +1000,9 @@ async def run_stream( raw_representation=update, ) - response = ChatResponse.from_chat_response_updates(response_updates, output_format_type=co.response_format) + response = ChatResponse.from_chat_response_updates( + response_updates, output_format_type=co.get("response_format") + ) await self._update_thread_with_type_and_conversation_id(thread, response.conversation_id) await self._notify_thread_of_new_messages( @@ -1043,9 +1047,9 @@ def get_new_thread( service_thread_id=service_thread_id, context_provider=self.context_provider, ) - if self.chat_options.conversation_id is not None: + if self.default_options.get("conversation_id") is not None: return AgentThread( - service_thread_id=self.chat_options.conversation_id, + service_thread_id=self.default_options["conversation_id"], context_provider=self.context_provider, ) if self.chat_message_store_factory is not None: @@ -1202,7 +1206,7 @@ async def _prepare_thread_and_messages( thread: AgentThread | None, input_messages: list[ChatMessage] | None = None, **kwargs: Any, - ) -> tuple[AgentThread, ChatOptions, list[ChatMessage]]: + ) -> tuple[AgentThread, dict[str, Any], list[ChatMessage]]: """Prepare the thread and messages for agent execution. This method prepares the conversation thread, merges context provider data, @@ -1222,7 +1226,7 @@ async def _prepare_thread_and_messages( Raises: AgentExecutionException: If the conversation IDs on the thread and agent don't match. """ - chat_options = deepcopy(self.chat_options) if self.chat_options else ChatOptions() + chat_options = deepcopy(self.default_options) if self.default_options else {} thread = thread or self.get_new_thread() if thread.service_thread_id and thread.context_provider: await thread.context_provider.thread_created(thread.service_thread_id) @@ -1239,21 +1243,21 @@ async def _prepare_thread_and_messages( if context.messages: thread_messages.extend(context.messages) if context.tools: - if chat_options.tools is not None: - chat_options.tools.extend(context.tools) + if chat_options.get("tools") is not None: + chat_options["tools"].extend(context.tools) else: - chat_options.tools = list(context.tools) + chat_options["tools"] = list(context.tools) if context.instructions: - chat_options.instructions = ( + chat_options["instructions"] = ( context.instructions - if not chat_options.instructions - else f"{chat_options.instructions}\n{context.instructions}" + if not chat_options.get("instructions") + else f"{chat_options['instructions']}\n{context.instructions}" ) thread_messages.extend(input_messages or []) if ( thread.service_thread_id - and chat_options.conversation_id - and thread.service_thread_id != chat_options.conversation_id + and chat_options.get("conversation_id") + and thread.service_thread_id != chat_options["conversation_id"] ): raise AgentExecutionException( "The conversation_id set on the agent is different from the one set on the thread, " diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index ca273ea536..f48e8af86a 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -1,11 +1,24 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import sys from abc import ABC, abstractmethod -from collections.abc import AsyncIterable, Callable, MutableMapping, MutableSequence, Sequence -from typing import TYPE_CHECKING, Any, ClassVar, Literal, Protocol, TypeVar, runtime_checkable - -from pydantic import BaseModel +from collections.abc import ( + AsyncIterable, + Callable, + MutableMapping, + MutableSequence, + Sequence, +) +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Generic, + Protocol, + TypedDict, + runtime_checkable, +) from ._logging import get_logger from ._memory import ContextProvider @@ -18,11 +31,27 @@ ) from ._serialization import SerializationMixin from ._threads import ChatMessageStoreProtocol -from ._tools import FUNCTION_INVOKING_CHAT_CLIENT_MARKER, FunctionInvocationConfiguration, ToolProtocol -from ._types import ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, ToolMode, prepare_messages +from ._tools import ( + FUNCTION_INVOKING_CHAT_CLIENT_MARKER, + FunctionInvocationConfiguration, + ToolProtocol, +) +from ._types import ( + ChatMessage, + ChatResponse, + ChatResponseUpdate, + prepare_messages, + validate_chat_options, +) + +if sys.version_info >= (3, 13): + from typing import TypeVar +else: + from typing_extensions import TypeVar if TYPE_CHECKING: from ._agents import ChatAgent + from ._types import ChatOptions TInput = TypeVar("TInput", contravariant=True) @@ -39,14 +68,26 @@ # region ChatClientProtocol Protocol +# Contravariant for the Protocol +TOptions_contra = TypeVar( + "TOptions_contra", + bound=TypedDict, # type: ignore[valid-type] + default="ChatOptions", + contravariant=True, +) + @runtime_checkable -class ChatClientProtocol(Protocol): +class ChatClientProtocol(Protocol[TOptions_contra]): # """A protocol for a chat client that can generate responses. This protocol defines the interface that all chat clients must implement, including methods for generating both streaming and non-streaming responses. + The generic type parameter TOptions specifies which options TypedDict this + client accepts, enabling IDE autocomplete and type checking for provider-specific + options. + Note: Protocols use structural subtyping (duck typing). Classes don't need to explicitly inherit from this protocol to be considered compatible. @@ -59,10 +100,6 @@ class ChatClientProtocol(Protocol): # Any class implementing the required methods is compatible class CustomChatClient: - @property - def additional_properties(self) -> dict[str, Any]: - return {} - async def get_response(self, messages, **kwargs): # Your custom implementation return ChatResponse(messages=[], response_id="custom") @@ -81,61 +118,21 @@ async def _stream(): assert isinstance(client, ChatClientProtocol) """ - @property - def additional_properties(self) -> dict[str, Any]: - """Get additional properties associated with the client.""" - ... + additional_properties: dict[str, Any] async def get_response( self, - messages: str | ChatMessage | list[str] | list[ChatMessage], + messages: str | ChatMessage | Sequence[str | ChatMessage], *, - frequency_penalty: float | None = None, - logit_bias: dict[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: dict[str, Any] | None = None, - model_id: str | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, - tools: ToolProtocol - | Callable[..., Any] - | MutableMapping[str, Any] - | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] - | None = None, - top_p: float | None = None, - user: str | None = None, - additional_properties: dict[str, Any] | None = None, + options: TOptions_contra | None = None, **kwargs: Any, ) -> ChatResponse: """Send input and return the response. Args: messages: The sequence of input messages to send. - - Keyword Args: - frequency_penalty: The frequency penalty to use. - logit_bias: The logit bias to use. - max_tokens: The maximum number of tokens to generate. - metadata: Additional metadata to include in the request. - model_id: The model_id to use for the agent. - presence_penalty: The presence penalty to use. - response_format: The format of the response. - seed: The random seed to use. - stop: The stop sequence(s) for the request. - store: Whether to store the response. - temperature: The sampling temperature to use. - tool_choice: The tool choice for the request. - tools: The tools to use for the request. - top_p: The nucleus sampling probability to use. - user: The user to associate with the request. - additional_properties: Additional properties to include in the request. - kwargs: Any additional keyword arguments. - Will only be passed to functions that are called. + options: Chat options as a TypedDict. + **kwargs: Additional chat options. Returns: The response messages generated by the client. @@ -147,155 +144,48 @@ async def get_response( def get_streaming_response( self, - messages: str | ChatMessage | list[str] | list[ChatMessage], + messages: str | ChatMessage | Sequence[str | ChatMessage], *, - frequency_penalty: float | None = None, - logit_bias: dict[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: dict[str, Any] | None = None, - model_id: str | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, - tools: ToolProtocol - | Callable[..., Any] - | MutableMapping[str, Any] - | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] - | None = None, - top_p: float | None = None, - user: str | None = None, - additional_properties: dict[str, Any] | None = None, + options: TOptions_contra | None = None, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: """Send input messages and stream the response. Args: messages: The sequence of input messages to send. - - Keyword Args: - frequency_penalty: The frequency penalty to use. - logit_bias: The logit bias to use. - max_tokens: The maximum number of tokens to generate. - metadata: Additional metadata to include in the request. - model_id: The model_id to use for the agent. - presence_penalty: The presence penalty to use. - response_format: The format of the response. - seed: The random seed to use. - stop: The stop sequence(s) for the request. - store: Whether to store the response. - temperature: The sampling temperature to use. - tool_choice: The tool choice for the request. - tools: The tools to use for the request. - top_p: The nucleus sampling probability to use. - user: The user to associate with the request. - additional_properties: Additional properties to include in the request. - kwargs: Any additional keyword arguments. - Will only be passed to functions that are called. + options: Chat options as a TypedDict. + **kwargs: Additional chat options. Yields: - ChatResponseUpdate: An async iterable of chat response updates containing - the content of the response messages generated by the client. - - Raises: - ValueError: If the input message sequence is ``None``. + ChatResponseUpdate: Partial response updates as they're generated. """ ... +# endregion + + # region ChatClientBase +# Covariant for the BaseChatClient +TOptions_co = TypeVar( + "TOptions_co", + bound=TypedDict, # type: ignore[valid-type] + default="ChatOptions", + covariant=True, +) -def _merge_chat_options( - *, - base_chat_options: ChatOptions | Any | None, - model_id: str | None = None, - allow_multiple_tool_calls: bool | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: dict[str, Any] | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, - tools: list[ToolProtocol | dict[str, Any] | Callable[..., Any]] | None = None, - top_p: float | None = None, - user: str | None = None, - additional_properties: dict[str, Any] | None = None, -) -> ChatOptions: - """Merge base chat options with direct parameters to create a new ChatOptions instance. - - When both base_chat_options and individual parameters are provided, the individual - parameters take precedence and override the corresponding values in base_chat_options. - Tools from both sources are combined into a single list. - - Keyword Args: - base_chat_options: Optional base ChatOptions to merge with direct parameters. - model_id: The model_id to use for the agent. - allow_multiple_tool_calls: Whether to allow multiple tool calls in a single response. - frequency_penalty: The frequency penalty to use. - logit_bias: The logit bias to use. - max_tokens: The maximum number of tokens to generate. - metadata: Additional metadata to include in the request. - presence_penalty: The presence penalty to use. - response_format: The format of the response. - seed: The random seed to use. - stop: The stop sequence(s) for the request. - store: Whether to store the response. - temperature: The sampling temperature to use. - tool_choice: The tool choice for the request. - tools: The normalized tools to use for the request. - top_p: The nucleus sampling probability to use. - user: The user to associate with the request. - additional_properties: Additional properties to include in the request. - - Returns: - A new ChatOptions instance with merged values. - - Raises: - TypeError: If base_chat_options is not None and not an instance of ChatOptions. - """ - # Validate base_chat_options type if provided - if base_chat_options is not None and not isinstance(base_chat_options, ChatOptions): - raise TypeError("chat_options must be an instance of ChatOptions") - - if base_chat_options is None: - base_chat_options = ChatOptions() - - return base_chat_options & ChatOptions( - model_id=model_id, - allow_multiple_tool_calls=allow_multiple_tool_calls, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - max_tokens=max_tokens, - metadata=metadata, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - store=store, - temperature=temperature, - top_p=top_p, - tool_choice=tool_choice, - tools=tools, - user=user, - additional_properties=additional_properties, - ) - - -class BaseChatClient(SerializationMixin, ABC): + +class BaseChatClient(SerializationMixin, ABC, Generic[TOptions_co]): """Base class for chat clients. This abstract base class provides core functionality for chat client implementations, including middleware support, message preparation, and tool normalization. + The generic type parameter TOptions specifies which options TypedDict this client + accepts. This enables IDE autocomplete and type checking for provider-specific options + when using the typed overloads of get_response and get_streaming_response. + Note: BaseChatClient cannot be instantiated directly as it's an abstract base class. Subclasses must implement ``_inner_get_response()`` and ``_inner_get_streaming_response()``. @@ -308,13 +198,13 @@ class BaseChatClient(SerializationMixin, ABC): class CustomChatClient(BaseChatClient): - async def _inner_get_response(self, *, messages, chat_options, **kwargs): + async def _inner_get_response(self, *, messages, options, **kwargs): # Your custom implementation return ChatResponse( messages=[ChatMessage(role="assistant", text="Hello!")], response_id="custom-response" ) - async def _inner_get_streaming_response(self, *, messages, chat_options, **kwargs): + async def _inner_get_streaming_response(self, *, messages, options, **kwargs): # Your custom streaming implementation from agent_framework import ChatResponseUpdate @@ -379,57 +269,6 @@ def to_dict(self, *, exclude: set[str] | None = None, exclude_none: bool = True) return result - def _filter_internal_kwargs(self, kwargs: dict[str, Any]) -> dict[str, Any]: - """Filter out internal framework parameters that shouldn't be passed to chat client implementations. - - Keyword Args: - kwargs: The original kwargs dictionary. - - Returns: - A filtered kwargs dictionary without internal parameters. - """ - return {k: v for k, v in kwargs.items() if not k.startswith("_")} - - @staticmethod - async def _normalize_tools( - tools: ToolProtocol - | MutableMapping[str, Any] - | Callable[..., Any] - | Sequence[ToolProtocol | MutableMapping[str, Any] | Callable[..., Any]] - | None = None, - ) -> list[ToolProtocol | dict[str, Any] | Callable[..., Any]]: - """Normalize tools input to a consistent list format. - - Expands MCP tools to their constituent functions, connecting them if needed. - - Args: - tools: The tools in various supported formats. - - Returns: - A normalized list of tools. - """ - from typing import cast - - final_tools: list[ToolProtocol | dict[str, Any] | Callable[..., Any]] = [] - if not tools: - return final_tools - # Use cast when a sequence is passed (likely already a list) - tools_list = ( - cast(list[ToolProtocol | MutableMapping[str, Any] | Callable[..., Any]], tools) - if isinstance(tools, Sequence) and not isinstance(tools, (str, bytes)) - else [tools] - ) - for tool in tools_list: # type: ignore[reportUnknownType] - from ._mcp import MCPTool - - if isinstance(tool, MCPTool): - if not tool.is_connected: - await tool.connect() - final_tools.extend(tool.functions) # type: ignore - continue - final_tools.append(tool) # type: ignore - return final_tools - # region Internal methods to be implemented by the derived classes @abstractmethod @@ -437,14 +276,14 @@ async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: """Send a chat request to the AI service. Keyword Args: messages: The chat messages to send. - chat_options: The options for the request. + options: The options dict for the request. kwargs: Any additional keyword arguments. Returns: @@ -456,14 +295,14 @@ async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: """Send a streaming chat request to the AI service. Keyword Args: messages: The chat messages to send. - chat_options: The chat_options for the request. + options: The options dict for the request. kwargs: Any additional keyword arguments. Yields: @@ -482,222 +321,51 @@ async def _inner_get_streaming_response( async def get_response( self, - messages: str | ChatMessage | list[str] | list[ChatMessage], + messages: str | ChatMessage | Sequence[str | ChatMessage], *, - allow_multiple_tool_calls: bool | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: dict[str, Any] | None = None, - model_id: str | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, - tools: ToolProtocol - | Callable[..., Any] - | MutableMapping[str, Any] - | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] - | None = None, - top_p: float | None = None, - user: str | None = None, - additional_properties: dict[str, Any] | None = None, + options: TOptions_co | None = None, **kwargs: Any, ) -> ChatResponse: """Get a response from a chat client. - When both ``chat_options`` (in kwargs) and individual parameters are provided, - the individual parameters take precedence and override the corresponding values - in ``chat_options``. Tools from both sources are combined into a single list. - Args: messages: The message or messages to send to the model. - - Keyword Args: - allow_multiple_tool_calls: Whether to allow multiple tool calls in a single response. - frequency_penalty: The frequency penalty to use. - logit_bias: The logit bias to use. - max_tokens: The maximum number of tokens to generate. - metadata: Additional metadata to include in the request. - model_id: The model_id to use for the agent. - presence_penalty: The presence penalty to use. - response_format: The format of the response. - seed: The random seed to use. - stop: The stop sequence(s) for the request. - store: Whether to store the response. - temperature: The sampling temperature to use. - tool_choice: The tool choice for the request. - Default is `auto`. - tools: The tools to use for the request. - top_p: The nucleus sampling probability to use. - user: The user to associate with the request. - additional_properties: Additional properties to include in the request. - Can be used for provider-specific parameters. - kwargs: Any additional keyword arguments. - May include ``chat_options`` which provides base values that can be overridden by direct parameters. + options: Chat options as a TypedDict. + **kwargs: Other keyword arguments, can be used to pass function specific parameters. Returns: - A chat response from the model_id. + A chat response from the model. """ - # Normalize tools and merge with base chat_options - normalized_tools = await self._normalize_tools(tools) - chat_options = _merge_chat_options( - base_chat_options=kwargs.pop("chat_options", None), - model_id=model_id, - allow_multiple_tool_calls=allow_multiple_tool_calls, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - max_tokens=max_tokens, - metadata=metadata, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - store=store, - temperature=temperature, - tool_choice=tool_choice, - tools=normalized_tools, - top_p=top_p, - user=user, - additional_properties=additional_properties, + return await self._inner_get_response( + messages=prepare_messages(messages), + options=await validate_chat_options(dict(options) if options else {}), + **kwargs, ) - if chat_options.instructions: - system_msg = ChatMessage(role="system", text=chat_options.instructions) - prepped_messages = [system_msg, *prepare_messages(messages)] - else: - prepped_messages = prepare_messages(messages) - self._prepare_tool_choice(chat_options=chat_options) - - filtered_kwargs = self._filter_internal_kwargs(kwargs) - return await self._inner_get_response(messages=prepped_messages, chat_options=chat_options, **filtered_kwargs) - async def get_streaming_response( self, - messages: str | ChatMessage | list[str] | list[ChatMessage], + messages: str | ChatMessage | Sequence[str | ChatMessage], *, - allow_multiple_tool_calls: bool | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: dict[str, Any] | None = None, - model_id: str | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, - tools: ToolProtocol - | Callable[..., Any] - | MutableMapping[str, Any] - | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] - | None = None, - top_p: float | None = None, - user: str | None = None, - additional_properties: dict[str, Any] | None = None, + options: TOptions_co | None = None, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: """Get a streaming response from a chat client. - When both ``chat_options`` (in kwargs) and individual parameters are provided, - the individual parameters take precedence and override the corresponding values - in ``chat_options``. Tools from both sources are combined into a single list. - Args: messages: The message or messages to send to the model. - - Keyword Args: - allow_multiple_tool_calls: Whether to allow multiple tool calls in a single response. - frequency_penalty: The frequency penalty to use. - logit_bias: The logit bias to use. - max_tokens: The maximum number of tokens to generate. - metadata: Additional metadata to include in the request. - model_id: The model_id to use for the agent. - presence_penalty: The presence penalty to use. - response_format: The format of the response. - seed: The random seed to use. - stop: The stop sequence(s) for the request. - store: Whether to store the response. - temperature: The sampling temperature to use. - tool_choice: The tool choice for the request. - Default is `auto`. - tools: The tools to use for the request. - top_p: The nucleus sampling probability to use. - user: The user to associate with the request. - additional_properties: Additional properties to include in the request. - Can be used for provider-specific parameters. - kwargs: Any additional keyword arguments. - May include ``chat_options`` which provides base values that can be overridden by direct parameters. + options: Chat options as a TypedDict. + **kwargs: Other keyword arguments, can be used to pass function specific parameters. Yields: ChatResponseUpdate: A stream representing the response(s) from the LLM. """ - # Normalize tools and merge with base chat_options - normalized_tools = await self._normalize_tools(tools) - chat_options = _merge_chat_options( - base_chat_options=kwargs.pop("chat_options", None), - model_id=model_id, - allow_multiple_tool_calls=allow_multiple_tool_calls, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - max_tokens=max_tokens, - metadata=metadata, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - store=store, - temperature=temperature, - tool_choice=tool_choice, - tools=normalized_tools, - top_p=top_p, - user=user, - additional_properties=additional_properties, - ) - - if chat_options.instructions: - system_msg = ChatMessage(role="system", text=chat_options.instructions) - prepped_messages = [system_msg, *prepare_messages(messages)] - else: - prepped_messages = prepare_messages(messages) - self._prepare_tool_choice(chat_options=chat_options) - - filtered_kwargs = self._filter_internal_kwargs(kwargs) async for update in self._inner_get_streaming_response( - messages=prepped_messages, chat_options=chat_options, **filtered_kwargs + messages=prepare_messages(messages), + options=await validate_chat_options(dict(options) if options else {}), + **kwargs, ): yield update - def _prepare_tool_choice(self, chat_options: ChatOptions) -> None: - """Prepare the tools and tool choice for the chat options. - - This function should be overridden by subclasses to customize tool handling, - as it currently parses only AIFunctions. - - Args: - chat_options: The chat options to prepare. - """ - chat_tool_mode = chat_options.tool_choice - # Explicitly disabled: clear tools and set to NONE - if chat_tool_mode == ToolMode.NONE or chat_tool_mode == "none": - chat_options.tools = None - chat_options.tool_choice = ToolMode.NONE - return - # No tools available: set to NONE regardless of requested mode - if not chat_options.tools: - chat_options.tool_choice = ToolMode.NONE - # Tools available but no explicit mode: default to AUTO - elif chat_tool_mode is None: - chat_options.tool_choice = ToolMode.AUTO - # Tools available with explicit mode: preserve the mode - else: - chat_options.tool_choice = chat_tool_mode - def service_url(self) -> str: """Get the URL of the service. @@ -716,33 +384,17 @@ def create_agent( name: str | None = None, description: str | None = None, instructions: str | None = None, - chat_message_store_factory: Callable[[], ChatMessageStoreProtocol] | None = None, - context_provider: ContextProvider | None = None, - middleware: Sequence[Middleware] | None = None, - allow_multiple_tool_calls: bool | None = None, - conversation_id: str | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: dict[str, Any] | None = None, - model_id: str | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] | None = None, - top_p: float | None = None, - user: str | None = None, - additional_chat_options: dict[str, Any] | None = None, + default_options: TOptions_co | None = None, + chat_message_store_factory: Callable[[], ChatMessageStoreProtocol] | None = None, + context_provider: ContextProvider | None = None, + middleware: Sequence[Middleware] | None = None, **kwargs: Any, - ) -> "ChatAgent": + ) -> "ChatAgent[TOptions_co]": """Create a ChatAgent with this client. This is a convenience method that creates a ChatAgent instance with this @@ -754,30 +406,14 @@ def create_agent( description: A brief description of the agent's purpose. instructions: Optional instructions for the agent. These will be put into the messages sent to the chat client service as a system message. + tools: The tools to use for the request. + default_options: A TypedDict containing chat options. When using a typed client like + ``OpenAIChatClient``, this enables IDE autocomplete for provider-specific options + including temperature, max_tokens, model_id, tool_choice, and more. chat_message_store_factory: Factory function to create an instance of ChatMessageStoreProtocol. If not provided, the default in-memory store will be used. - context_provider: Context provider to include during agent invocation. - middleware: List of middleware to intercept chat and function invocations. - allow_multiple_tool_calls: Whether to allow multiple tool calls per agent turn. - conversation_id: The conversation ID to associate with the agent's messages. - frequency_penalty: The frequency penalty to use. - logit_bias: The logit bias to use. - max_tokens: The maximum number of tokens to generate. - metadata: Additional metadata to include in the request. - model_id: The model_id to use for the agent. - presence_penalty: The presence penalty to use. - response_format: The format of the response. - seed: The random seed to use. - stop: The stop sequence(s) for the request. - store: Whether to store the response. - temperature: The sampling temperature to use. - tool_choice: The tool choice for the request. - tools: The tools to use for the request. - top_p: The nucleus sampling probability to use. - user: The user to associate with the request. - additional_chat_options: A dictionary of other values that will be passed through - to the chat_client ``get_response`` and ``get_streaming_response`` methods. - This can be used to pass provider specific parameters. + context_provider: Context providers to include during agent invocation. + middleware: List of middleware to intercept agent and function invocations. kwargs: Any additional keyword arguments. Will be stored as ``additional_properties``. Returns: @@ -786,14 +422,16 @@ def create_agent( Examples: .. code-block:: python - from agent_framework.clients import OpenAIChatClient + from agent_framework.openai import OpenAIChatClient # Create a client client = OpenAIChatClient(model_id="gpt-4") # Create an agent using the convenience method agent = client.create_agent( - name="assistant", instructions="You are a helpful assistant.", temperature=0.7 + name="assistant", + instructions="You are a helpful assistant.", + default_options={"temperature": 0.7, "max_tokens": 500}, ) # Run the agent @@ -807,26 +445,10 @@ def create_agent( name=name, description=description, instructions=instructions, + tools=tools, + default_options=default_options, chat_message_store_factory=chat_message_store_factory, context_provider=context_provider, middleware=middleware, - allow_multiple_tool_calls=allow_multiple_tool_calls, - conversation_id=conversation_id, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - max_tokens=max_tokens, - metadata=metadata, - model_id=model_id, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - store=store, - temperature=temperature, - tool_choice=tool_choice, - tools=tools, - top_p=top_p, - user=user, - additional_chat_options=additional_chat_options, **kwargs, ) diff --git a/python/packages/core/agent_framework/_middleware.py b/python/packages/core/agent_framework/_middleware.py index 00a465f72b..6c19347e2c 100644 --- a/python/packages/core/agent_framework/_middleware.py +++ b/python/packages/core/agent_framework/_middleware.py @@ -2,7 +2,7 @@ import inspect from abc import ABC, abstractmethod -from collections.abc import AsyncIterable, Awaitable, Callable, MutableSequence, Sequence +from collections.abc import AsyncIterable, Awaitable, Callable, Mapping, MutableSequence, Sequence from enum import Enum from functools import update_wrapper from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypeAlias, TypedDict, TypeVar @@ -18,7 +18,7 @@ from ._clients import ChatClientProtocol from ._threads import AgentThread from ._tools import AIFunction - from ._types import ChatOptions, ChatResponse, ChatResponseUpdate + from ._types import ChatResponse, ChatResponseUpdate __all__ = [ @@ -38,7 +38,7 @@ ] TAgent = TypeVar("TAgent", bound="AgentProtocol") -TChatClient = TypeVar("TChatClient", bound="ChatClientProtocol") +TChatClient = TypeVar("TChatClient", bound="ChatClientProtocol[Any]") TContext = TypeVar("TContext") @@ -206,7 +206,7 @@ class ChatContext(SerializationMixin): Attributes: chat_client: The chat client being invoked. messages: The messages being sent to the chat client. - chat_options: The options for the chat request. + options: The options for the chat request as a dict. is_streaming: Whether this is a streaming invocation. metadata: Metadata dictionary for sharing data between chat middleware. result: Chat execution result. Can be observed after calling ``next()`` @@ -227,7 +227,7 @@ class TokenCounterMiddleware(ChatMiddleware): async def process(self, context: ChatContext, next): print(f"Chat client: {context.chat_client.__class__.__name__}") print(f"Messages: {len(context.messages)}") - print(f"Model: {context.chat_options.model_id}") + print(f"Model: {context.options.get('model_id')}") # Store metadata context.metadata["input_tokens"] = self.count_tokens(context.messages) @@ -246,7 +246,7 @@ def __init__( self, chat_client: "ChatClientProtocol", messages: "MutableSequence[ChatMessage]", - chat_options: "ChatOptions", + options: Mapping[str, Any] | None, is_streaming: bool = False, metadata: dict[str, Any] | None = None, result: "ChatResponse | AsyncIterable[ChatResponseUpdate] | None" = None, @@ -258,7 +258,7 @@ def __init__( Args: chat_client: The chat client being invoked. messages: The messages being sent to the chat client. - chat_options: The options for the chat request. + options: The options for the chat request as a dict. is_streaming: Whether this is a streaming invocation. metadata: Metadata dictionary for sharing data between chat middleware. result: Chat execution result. @@ -267,7 +267,7 @@ def __init__( """ self.chat_client = chat_client self.messages = messages - self.chat_options = chat_options + self.options = options self.is_streaming = is_streaming self.metadata = metadata if metadata is not None else {} self.result = result @@ -974,7 +974,7 @@ async def execute( self, chat_client: "ChatClientProtocol", messages: "MutableSequence[ChatMessage]", - chat_options: "ChatOptions", + options: Mapping[str, Any] | None, context: ChatContext, final_handler: Callable[[ChatContext], Awaitable["ChatResponse"]], **kwargs: Any, @@ -984,7 +984,7 @@ async def execute( Args: chat_client: The chat client being invoked. messages: The messages being sent to the chat client. - chat_options: The options for the chat request. + options: The options for the chat request as a dict. context: The chat invocation context. final_handler: The final handler that performs the actual chat execution. **kwargs: Additional keyword arguments. @@ -995,7 +995,8 @@ async def execute( # Update context with chat client, messages, and options context.chat_client = chat_client context.messages = messages - context.chat_options = chat_options + if options: + context.options = options if not self._middleware: return await final_handler(context) @@ -1023,7 +1024,7 @@ async def execute_stream( self, chat_client: "ChatClientProtocol", messages: "MutableSequence[ChatMessage]", - chat_options: "ChatOptions", + options: Mapping[str, Any] | None, context: ChatContext, final_handler: Callable[[ChatContext], AsyncIterable["ChatResponseUpdate"]], **kwargs: Any, @@ -1033,7 +1034,7 @@ async def execute_stream( Args: chat_client: The chat client being invoked. messages: The messages being sent to the chat client. - chat_options: The options for the chat request. + options: The options for the chat request as a dict. context: The chat invocation context. final_handler: The final handler that performs the actual streaming chat execution. **kwargs: Additional keyword arguments. @@ -1044,7 +1045,8 @@ async def execute_stream( # Update context with chat client, messages, and options context.chat_client = chat_client context.messages = messages - context.chat_options = chat_options + if options: + context.options = options context.is_streaming = True if not self._middleware: @@ -1346,6 +1348,8 @@ async def get_streaming_response(self, messages, **kwargs): async def middleware_enabled_get_response( self: Any, messages: Any, + *, + options: Mapping[str, Any] | None = None, **kwargs: Any, ) -> Any: """Middleware-enabled get_response method.""" @@ -1366,30 +1370,35 @@ async def middleware_enabled_get_response( # If no chat middleware, use original method if not chat_middleware_list: - return await original_get_response(self, messages, **kwargs) + return await original_get_response( + self, + messages, + options=options, # type: ignore[arg-type] + **kwargs, + ) # Create pipeline and execute with middleware - from ._types import ChatOptions - - # Extract chat_options or create default - chat_options = kwargs.pop("chat_options", ChatOptions()) - pipeline = ChatMiddlewarePipeline(chat_middleware_list) # type: ignore[arg-type] context = ChatContext( chat_client=self, messages=prepare_messages(messages), - chat_options=chat_options, + options=options, is_streaming=False, kwargs=kwargs, ) async def final_handler(ctx: ChatContext) -> Any: - return await original_get_response(self, list(ctx.messages), chat_options=ctx.chat_options, **ctx.kwargs) + return await original_get_response( + self, + list(ctx.messages), + options=ctx.options, # type: ignore[arg-type] + **ctx.kwargs, + ) return await pipeline.execute( chat_client=self, messages=context.messages, - chat_options=context.chat_options, + options=options, context=context, final_handler=final_handler, **kwargs, @@ -1398,6 +1407,8 @@ async def final_handler(ctx: ChatContext) -> Any: def middleware_enabled_get_streaming_response( self: Any, messages: Any, + *, + options: dict[str, Any] | None = None, **kwargs: Any, ) -> Any: """Middleware-enabled get_streaming_response method.""" @@ -1418,34 +1429,37 @@ async def _stream_generator() -> Any: # If no chat middleware, use original method if not chat_middleware_list: - async for update in original_get_streaming_response(self, messages, **kwargs): + async for update in original_get_streaming_response( + self, + messages, + options=options, # type: ignore[arg-type] + **kwargs, + ): yield update return # Create pipeline and execute with middleware - from ._types import ChatOptions - - # Extract chat_options or create default - chat_options = kwargs.pop("chat_options", ChatOptions()) - pipeline = ChatMiddlewarePipeline(chat_middleware_list) # type: ignore[arg-type] context = ChatContext( chat_client=self, messages=prepare_messages(messages), - chat_options=chat_options, + options=options or {}, is_streaming=True, kwargs=kwargs, ) def final_handler(ctx: ChatContext) -> Any: return original_get_streaming_response( - self, list(ctx.messages), chat_options=ctx.chat_options, **ctx.kwargs + self, + list(ctx.messages), + options=ctx.options, # type: ignore[arg-type] + **ctx.kwargs, ) async for update in pipeline.execute_stream( chat_client=self, messages=context.messages, - chat_options=context.chat_options, + options=options or {}, context=context, final_handler=final_handler, **kwargs, diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 7a0b1672a7..a0d0a13dc2 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -59,21 +59,12 @@ FunctionCallContent, ) -if sys.version_info >= (3, 12): - from typing import ( - TypedDict, # pragma: no cover - override, # type: ignore # pragma: no cover - ) -else: - from typing_extensions import ( - TypedDict, # pragma: no cover - override, # type: ignore[import] # pragma: no cover - ) +from typing import overload -if sys.version_info >= (3, 11): - from typing import overload # pragma: no cover +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover else: - from typing_extensions import overload # pragma: no cover + from typing_extensions import override # type: ignore[import] # pragma: no cover logger = get_logger() @@ -97,7 +88,7 @@ FUNCTION_INVOKING_CHAT_CLIENT_MARKER: Final[str] = "__function_invoking_chat_client__" DEFAULT_MAX_ITERATIONS: Final[int] = 40 DEFAULT_MAX_CONSECUTIVE_ERRORS_PER_REQUEST: Final[int] = 3 -TChatClient = TypeVar("TChatClient", bound="ChatClientProtocol") +TChatClient = TypeVar("TChatClient", bound="ChatClientProtocol[Any]") # region Helpers ArgsT = TypeVar("ArgsT", bound=BaseModel) @@ -1764,19 +1755,19 @@ def _update_conversation_id(kwargs: dict[str, Any], conversation_id: str | None) kwargs["conversation_id"] = conversation_id -def _extract_tools(kwargs: dict[str, Any]) -> Any: - """Extract tools from kwargs or chat_options. +def _extract_tools(options: dict[str, Any] | None) -> Any: + """Extract tools from options dict. + + Args: + options: The options dict containing chat options. Returns: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] | None """ - from ._types import ChatOptions - - tools = kwargs.get("tools") - if not tools and (chat_options := kwargs.get("chat_options")) and isinstance(chat_options, ChatOptions): - tools = chat_options.tools - return tools + if options and isinstance(options, dict): + return options.get("tools") + return None def _collect_approval_responses( @@ -1869,6 +1860,8 @@ def decorator( async def function_invocation_wrapper( self: "ChatClientProtocol", messages: "str | ChatMessage | list[str] | list[ChatMessage]", + *, + options: dict[str, Any] | None = None, **kwargs: Any, ) -> "ChatResponse": from ._middleware import extract_and_merge_function_middleware @@ -1897,7 +1890,7 @@ async def function_invocation_wrapper( for attempt_idx in range(config.max_iterations if config.enabled else 0): fcc_todo = _collect_approval_responses(prepped_messages) if fcc_todo: - tools = _extract_tools(kwargs) + tools = _extract_tools(options) # Only execute APPROVED function calls, not rejected ones approved_responses = [resp for resp in fcc_todo.values() if resp.approved] approved_function_results: list[Contents] = [] @@ -1929,8 +1922,9 @@ async def function_invocation_wrapper( _replace_approval_contents_with_results(prepped_messages, fcc_todo, approved_function_results) # Filter out internal framework kwargs before passing to clients. - filtered_kwargs = {k: v for k, v in kwargs.items() if k != "thread"} - response = await func(self, messages=prepped_messages, **filtered_kwargs) + # Also exclude tools and tool_choice since they are now in options dict. + filtered_kwargs = {k: v for k, v in kwargs.items() if k not in ("thread", "tools", "tool_choice")} + response = await func(self, messages=prepped_messages, options=options, **filtered_kwargs) # if there are function calls, we will handle them first function_results = { it.call_id for it in response.messages[0].contents if isinstance(it, FunctionResultContent) @@ -1946,7 +1940,7 @@ async def function_invocation_wrapper( prepped_messages = [] # we load the tools here, since middleware might have changed them compared to before calling func. - tools = _extract_tools(kwargs) + tools = _extract_tools(options) if function_calls and tools: # Use the stored middleware pipeline instead of extracting from kwargs # because kwargs may have been modified by the underlying function @@ -2029,11 +2023,13 @@ async def function_invocation_wrapper( return response # Failsafe: give up on tools, ask model for plain answer - kwargs["tool_choice"] = "none" + if options is None: + options = {} + options["tool_choice"] = "none" # Filter out internal framework kwargs before passing to clients. filtered_kwargs = {k: v for k, v in kwargs.items() if k != "thread"} - response = await func(self, messages=prepped_messages, **filtered_kwargs) + response = await func(self, messages=prepped_messages, options=options, **filtered_kwargs) if fcc_messages: for msg in reversed(fcc_messages): response.messages.insert(0, msg) @@ -2065,6 +2061,8 @@ def decorator( async def streaming_function_invocation_wrapper( self: "ChatClientProtocol", messages: "str | ChatMessage | list[str] | list[ChatMessage]", + *, + options: dict[str, Any] | None = None, **kwargs: Any, ) -> AsyncIterable["ChatResponseUpdate"]: """Wrap the inner get streaming response method to handle tool calls.""" @@ -2093,7 +2091,7 @@ async def streaming_function_invocation_wrapper( for attempt_idx in range(config.max_iterations if config.enabled else 0): fcc_todo = _collect_approval_responses(prepped_messages) if fcc_todo: - tools = _extract_tools(kwargs) + tools = _extract_tools(options) # Only execute APPROVED function calls, not rejected ones approved_responses = [resp for resp in fcc_todo.values() if resp.approved] approved_function_results: list[Contents] = [] @@ -2119,7 +2117,7 @@ async def streaming_function_invocation_wrapper( all_updates: list["ChatResponseUpdate"] = [] # Filter out internal framework kwargs before passing to clients. filtered_kwargs = {k: v for k, v in kwargs.items() if k != "thread"} - async for update in func(self, messages=prepped_messages, **filtered_kwargs): + async for update in func(self, messages=prepped_messages, options=options, **filtered_kwargs): all_updates.append(update) yield update @@ -2157,7 +2155,7 @@ async def streaming_function_invocation_wrapper( prepped_messages = [] # we load the tools here, since middleware might have changed them compared to before calling func. - tools = _extract_tools(kwargs) + tools = _extract_tools(options) if function_calls and tools: # Use the stored middleware pipeline instead of extracting from kwargs # because kwargs may have been modified by the underlying function @@ -2236,10 +2234,12 @@ async def streaming_function_invocation_wrapper( return # Failsafe: give up on tools, ask model for plain answer - kwargs["tool_choice"] = "none" + if options is None: + options = {} + options["tool_choice"] = "none" # Filter out internal framework kwargs before passing to clients. filtered_kwargs = {k: v for k, v in kwargs.items() if k != "thread"} - async for update in func(self, messages=prepped_messages, **filtered_kwargs): + async for update in func(self, messages=prepped_messages, options=options, **filtered_kwargs): yield update return streaming_function_invocation_wrapper diff --git a/python/packages/core/agent_framework/_types.py b/python/packages/core/agent_framework/_types.py index ebe3d23e6f..3df5280e0f 100644 --- a/python/packages/core/agent_framework/_types.py +++ b/python/packages/core/agent_framework/_types.py @@ -13,7 +13,7 @@ Sequence, ) from copy import deepcopy -from typing import Any, ClassVar, Literal, TypeVar, cast, overload +from typing import Any, ClassVar, Literal, TypedDict, TypeVar, cast, overload from pydantic import BaseModel, ValidationError @@ -36,6 +36,7 @@ "BaseAnnotation", "BaseContent", "ChatMessage", + "ChatOptions", # Backward compatibility alias "ChatOptions", "ChatResponse", "ChatResponseUpdate", @@ -64,7 +65,12 @@ "UriContent", "UsageContent", "UsageDetails", + "merge_chat_options", "prepare_function_call_results", + "prepend_instructions_to_messages", + "validate_chat_options", + "validate_tool_mode", + "validate_tools", ] logger = get_logger("agent_framework") @@ -2457,7 +2463,7 @@ def text(self) -> str: def prepare_messages( - messages: str | ChatMessage | list[str] | list[ChatMessage], system_instructions: str | list[str] | None = None + messages: str | ChatMessage | Sequence[str | ChatMessage], system_instructions: str | Sequence[str] | None = None ) -> list[ChatMessage]: """Convert various message input formats into a list of ChatMessage objects. @@ -2488,6 +2494,49 @@ def prepare_messages( return return_messages +def prepend_instructions_to_messages( + messages: list[ChatMessage], + instructions: str | Sequence[str] | None, + role: Role | Literal["system", "user", "assistant"] = "system", +) -> list[ChatMessage]: + """Prepend instructions to a list of messages with a specified role. + + This is a helper method for chat clients that need to add instructions + from options as messages. Different providers support different roles for + instructions (e.g., OpenAI uses "system", some providers might use "user"). + + Args: + messages: The existing list of ChatMessage objects. + instructions: The instructions to prepend. Can be a single string or a sequence of strings. + role: The role to use for the instruction messages. Defaults to "system". + + Returns: + A new list with instruction messages prepended. + + Examples: + .. code-block:: python + + from agent_framework import prepend_instructions_to_messages, ChatMessage + + messages = [ChatMessage(role="user", text="Hello")] + instructions = "You are a helpful assistant" + + # Prepend as system message (default) + messages_with_instructions = prepend_instructions_to_messages(messages, instructions) + + # Or use a different role + messages_with_user_instructions = prepend_instructions_to_messages(messages, instructions, role="user") + """ + if instructions is None: + return messages + + if isinstance(instructions, str): + instructions = [instructions] + + instruction_messages = [ChatMessage(role=role, text=instr) for instr in instructions] + return [*instruction_messages, *messages] + + # region ChatResponse @@ -2845,7 +2894,7 @@ async def from_chat_response_generator( cls: type[TChatResponse], updates: AsyncIterable["ChatResponseUpdate"], *, - output_format_type: type[BaseModel] | None = None, + output_format_type: type[BaseModel] | Mapping[str, Any] | None = None, ) -> TChatResponse: """Joins multiple updates into a single ChatResponse. @@ -2870,7 +2919,7 @@ async def from_chat_response_generator( async for update in updates: _process_update(msg, update) _finalize_response(msg) - if output_format_type: + if output_format_type and isinstance(output_format_type, type) and issubclass(output_format_type, BaseModel): msg.try_parse_value(output_format_type) return msg @@ -2884,7 +2933,7 @@ def __str__(self) -> str: def try_parse_value(self, output_format_type: type[BaseModel]) -> None: """If there is a value, does nothing, otherwise tries to parse the text into the value.""" - if self.value is None: + if self.value is None and isinstance(output_format_type, type) and issubclass(output_format_type, BaseModel): try: self.value = output_format_type.model_validate_json(self.text) # type: ignore[reportUnknownMemberType] except ValidationError as ex: @@ -3301,372 +3350,321 @@ def __str__(self) -> str: # region ChatOptions -class ToolMode(SerializationMixin, metaclass=EnumLike): - """Defines if and how tools are used in a chat request. +class ToolMode(TypedDict, total=False): + """Tool choice mode for the chat options. + + Fields: + mode: One of "auto", "required", or "none". + required_function_name: Optional function name when `mode == "required"`. + """ + + mode: Literal["auto", "required", "none"] + required_function_name: str + + +# region TypedDict-based Chat Options + + +class ChatOptions(TypedDict, total=False): + """Common request settings for AI services as a TypedDict. + + All fields are optional (total=False) to allow partial specification. + Provider-specific TypedDicts extend this with additional options. + + These options represent the common denominator across chat providers. + Individual implementations may raise errors for unsupported options. Examples: .. code-block:: python - from agent_framework import ToolMode + from agent_framework import ChatOptions, ToolMode - # Use predefined tool modes - auto_mode = ToolMode.AUTO # Model decides when to use tools - required_mode = ToolMode.REQUIRED_ANY # Model must use a tool - none_mode = ToolMode.NONE # No tools allowed + # Type-safe options + options: ChatOptions = { + "temperature": 0.7, + "max_tokens": 1000, + "model_id": "gpt-4", + } - # Require a specific function - specific_mode = ToolMode.REQUIRED(function_name="get_weather") - print(specific_mode.required_function_name) # "get_weather" - - # Compare modes - print(auto_mode == "auto") # True + # With tools + options_with_tools: ChatOptions = { + "model_id": "gpt-4", + "tool_choice": "auto", + "temperature": 0.7, + } + + # Used with Unpack for function signatures + # async def get_response(self, **options: Unpack[ChatOptions]) -> ChatResponse: """ - # Constants configuration for EnumLike metaclass - _constants: ClassVar[dict[str, tuple[str, ...]]] = { - "AUTO": ("auto",), - "REQUIRED_ANY": ("required",), - "NONE": ("none",), - } + # Model selection + model_id: str - # Type annotations for constants - AUTO: "ToolMode" - REQUIRED_ANY: "ToolMode" - NONE: "ToolMode" + # Generation parameters + temperature: float + top_p: float + max_tokens: int + stop: str | Sequence[str] + seed: int + logit_bias: dict[str | int, float] - def __init__( - self, - mode: Literal["auto", "required", "none"] = "none", - *, - required_function_name: str | None = None, - ) -> None: - """Initialize ToolMode. + # Penalty parameters + frequency_penalty: float + presence_penalty: float - Args: - mode: The tool mode - "auto", "required", or "none". + # Tool configuration (forward reference to avoid circular import) + tools: "ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] | None" # noqa: E501 + tool_choice: ToolMode | Literal["auto", "required", "none"] + allow_multiple_tool_calls: bool - Keyword Args: - required_function_name: Optional function name for required mode. - """ - self.mode = mode - self.required_function_name = required_function_name + # Response configuration + response_format: type[BaseModel] | dict[str, Any] - @classmethod - def REQUIRED(cls, function_name: str | None = None) -> "ToolMode": - """Returns a ToolMode that requires the specified function to be called.""" - return cls(mode="required", required_function_name=function_name) + # Metadata + metadata: dict[str, Any] + user: str + store: bool + conversation_id: str - def __eq__(self, other: object) -> bool: - """Checks equality with another ToolMode or string.""" - if isinstance(other, str): - return self.mode == other - if isinstance(other, ToolMode): - return self.mode == other.mode and self.required_function_name == other.required_function_name - return False + # System/instructions + instructions: str - def __hash__(self) -> int: - """Return hash of the ToolMode for use in sets and dicts.""" - return hash((self.mode, self.required_function_name)) - def serialize_model(self) -> str: - """Serializes the ToolMode to just the mode string.""" - return self.mode +# region Chat Options Utility Functions - def __str__(self) -> str: - """Returns the string representation of the mode.""" - return self.mode - def __repr__(self) -> str: - """Returns the string representation of the ToolMode.""" - if self.required_function_name: - return f"ToolMode(mode={self.mode!r}, required_function_name={self.required_function_name!r})" - return f"ToolMode(mode={self.mode!r})" +async def validate_chat_options(options: dict[str, Any]) -> dict[str, Any]: + """Validate and normalize chat options dictionary. + + Validates numeric constraints and converts types as needed. + Args: + options: The options dictionary to validate. -class ChatOptions(SerializationMixin): - """Common request settings for AI services. + Returns: + The validated and normalized options dictionary. + + Raises: + ValueError: If any option value is invalid. Examples: .. code-block:: python - from agent_framework import ChatOptions, ai_function + from agent_framework import validate_chat_options - # Create basic chat options - options = ChatOptions( - model_id="gpt-4", - temperature=0.7, - max_tokens=1000, - ) + options = await validate_chat_options({ + "temperature": 0.7, + "max_tokens": 1000, + }) + """ + result = dict(options) # Make a copy + # Validate numeric constraints + if (freq_pen := result.get("frequency_penalty")) is not None: + if not (-2.0 <= freq_pen <= 2.0): + raise ValueError("frequency_penalty must be between -2.0 and 2.0") + result["frequency_penalty"] = float(freq_pen) - # With tools - @ai_function - def get_weather(location: str) -> str: - '''Get weather for a location.''' - return f"Weather in {location}" + if (pres_pen := result.get("presence_penalty")) is not None: + if not (-2.0 <= pres_pen <= 2.0): + raise ValueError("presence_penalty must be between -2.0 and 2.0") + result["presence_penalty"] = float(pres_pen) + if (temp := result.get("temperature")) is not None: + if not (0.0 <= temp <= 2.0): + raise ValueError("temperature must be between 0.0 and 2.0") + result["temperature"] = float(temp) - options = ChatOptions( - model_id="gpt-4", - tools=get_weather, - tool_choice="auto", - ) + if (top_p := result.get("top_p")) is not None: + if not (0.0 <= top_p <= 1.0): + raise ValueError("top_p must be between 0.0 and 1.0") + result["top_p"] = float(top_p) - # Require a specific tool to be called - options_required = ChatOptions( - model_id="gpt-4", - tools=get_weather, - tool_choice=ToolMode.REQUIRED(function_name="get_weather"), - ) + if (max_tokens := result.get("max_tokens")) is not None and max_tokens <= 0: + raise ValueError("max_tokens must be greater than 0") - # Combine options - base_options = ChatOptions(temperature=0.5) - extended_options = ChatOptions(max_tokens=500, tools=get_weather) - combined = base_options & extended_options - """ + # Validate and normalize tools + if "tools" in result: + result["tools"] = await validate_tools(result["tools"]) - DEFAULT_EXCLUDE: ClassVar[set[str]] = {"_tools"} # Internal field, use .tools property + return result - def __init__( - self, - *, - model_id: str | None = None, - allow_multiple_tool_calls: bool | None = None, - conversation_id: str | None = None, - frequency_penalty: float | None = None, - instructions: str | None = None, - logit_bias: MutableMapping[str | int, float] | None = None, - max_tokens: int | None = None, - metadata: MutableMapping[str, str] | None = None, - presence_penalty: float | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - stop: str | Sequence[str] | None = None, - store: bool | None = None, - temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | Mapping[str, Any] | None = None, - tools: ToolProtocol + +async def validate_tools( + tools: ( + ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] - | None = None, - top_p: float | None = None, - user: str | None = None, - additional_properties: MutableMapping[str, Any] | None = None, - **kwargs: Any, - ): - """Initialize ChatOptions. + | None + ), +) -> list[ToolProtocol | MutableMapping[str, Any]]: + """Validate and normalize tools into a list. - Keyword Args: - model_id: The AI model ID to use. - allow_multiple_tool_calls: Whether to allow multiple tool calls. - conversation_id: The conversation ID. - frequency_penalty: The frequency penalty (must be between -2.0 and 2.0). - instructions: the instructions, will be turned into a system or equivalent message. - logit_bias: The logit bias mapping. - max_tokens: The maximum number of tokens (must be > 0). - metadata: Metadata mapping. - presence_penalty: The presence penalty (must be between -2.0 and 2.0). - response_format: Structured output response format schema. Must be a valid Pydantic model. - seed: Random seed for reproducibility. - stop: Stop sequences. - store: Whether to store the conversation. - temperature: The temperature (must be between 0.0 and 2.0). - tool_choice: The tool choice mode. - tools: List of available tools. - top_p: The top-p value (must be between 0.0 and 1.0). - user: The user ID. - additional_properties: Provider-specific additional properties, can also be passed as kwargs. - **kwargs: Additional properties to include in additional_properties. - """ - # Validate numeric constraints and convert types as needed - if frequency_penalty is not None: - if not (-2.0 <= frequency_penalty <= 2.0): - raise ValueError("frequency_penalty must be between -2.0 and 2.0") - frequency_penalty = float(frequency_penalty) - if presence_penalty is not None: - if not (-2.0 <= presence_penalty <= 2.0): - raise ValueError("presence_penalty must be between -2.0 and 2.0") - presence_penalty = float(presence_penalty) - if temperature is not None: - if not (0.0 <= temperature <= 2.0): - raise ValueError("temperature must be between 0.0 and 2.0") - temperature = float(temperature) - if top_p is not None: - if not (0.0 <= top_p <= 1.0): - raise ValueError("top_p must be between 0.0 and 1.0") - top_p = float(top_p) - if max_tokens is not None and max_tokens <= 0: - raise ValueError("max_tokens must be greater than 0") - - if additional_properties is None: - additional_properties = {} - if kwargs: - additional_properties.update(kwargs) - - self.additional_properties = cast(dict[str, Any], additional_properties) - self.model_id = model_id - self.allow_multiple_tool_calls = allow_multiple_tool_calls - self.conversation_id = conversation_id - self.frequency_penalty = frequency_penalty - self.instructions = instructions - self.logit_bias = logit_bias - self.max_tokens = max_tokens - self.metadata = metadata - self.presence_penalty = presence_penalty - self.response_format = response_format - self.seed = seed - self.stop = stop - self.store = store - self.temperature = temperature - self.tool_choice = self._validate_tool_mode(tool_choice) - self._tools = self._validate_tools(tools) - self.top_p = top_p - self.user = user - - def __deepcopy__(self, memo: dict[int, Any]) -> "ChatOptions": - """Create a runtime-safe copy without deep-copying tool instances.""" - clone = type(self).__new__(type(self)) - memo[id(self)] = clone - for key, value in self.__dict__.items(): - if key == "_tools": - setattr(clone, key, list(value) if value is not None else None) - continue - if key in {"logit_bias", "metadata", "additional_properties"}: - setattr(clone, key, self._safe_deepcopy_mapping(value, memo)) - continue - setattr(clone, key, self._safe_deepcopy_value(value, memo)) - return clone + Converts callables to AIFunction objects, expands MCP tools to their constituent + functions (connecting them if needed), and ensures all tools are either ToolProtocol + instances or MutableMappings. - @staticmethod - def _safe_deepcopy_mapping( - value: MutableMapping[str, Any] | None, memo: dict[int, Any] - ) -> MutableMapping[str, Any] | None: - """Deep copy helper that falls back to a shallow copy for problematic mappings.""" - if value is None: - return None - try: - return deepcopy(value, memo) # type: ignore[arg-type] - except Exception: - return dict(value) + Args: + tools: Tools to validate - can be a single tool, callable, or sequence. - @staticmethod - def _safe_deepcopy_value(value: Any, memo: dict[int, Any]) -> Any: - """Deep copy helper that avoids failing on non-copyable instances.""" - try: - return deepcopy(value, memo) - except Exception: - return value + Returns: + Normalized list of tools, or None if no tools provided. - @property - def tools(self) -> list[ToolProtocol | MutableMapping[str, Any]] | None: - """Return the tools that are specified.""" - return self._tools + Examples: + .. code-block:: python - @tools.setter - def tools( - self, - new_tools: ToolProtocol - | Callable[..., Any] - | MutableMapping[str, Any] - | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] - | None, - ) -> None: - """Set the tools.""" - self._tools = self._validate_tools(new_tools) + from agent_framework import validate_tools, ai_function - @classmethod - def _validate_tools( - cls, - tools: ( - ToolProtocol - | Callable[..., Any] - | MutableMapping[str, Any] - | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] - | None - ), - ) -> list[ToolProtocol | MutableMapping[str, Any]] | None: - """Parse the tools field.""" - if not tools: - return None - if not isinstance(tools, Sequence): - if not isinstance(tools, (ToolProtocol, MutableMapping)): - return [ai_function(tools)] - return [tools] - return [tool if isinstance(tool, (ToolProtocol, MutableMapping)) else ai_function(tool) for tool in tools] - @classmethod - def _validate_tool_mode( - cls, tool_choice: ToolMode | Literal["auto", "required", "none"] | Mapping[str, Any] | None - ) -> ToolMode | None: - """Validates the tool_choice field to ensure it is a valid ToolMode.""" - if not tool_choice: - return None - if isinstance(tool_choice, str): - match tool_choice: - case "auto": - return ToolMode.AUTO - case "required": - return ToolMode.REQUIRED_ANY - case "none": - return ToolMode.NONE - case _: - raise ContentError(f"Invalid tool choice: {tool_choice}") - if isinstance(tool_choice, (dict, Mapping)): - return ToolMode.from_dict(tool_choice) # type: ignore - return tool_choice - - def __and__(self, other: object) -> "ChatOptions": - """Combines two ChatOptions instances. - - The values from the other ChatOptions take precedence. - List and dicts are combined. - """ - if not isinstance(other, ChatOptions): - return self - other_tools = other.tools - # tool_choice has a specialized serialize method. Save it here so we can fix it later. - tool_choice = other.tool_choice or self.tool_choice - # response_format is a class type that can't be serialized. Save it here so we can restore it later. - response_format = self.response_format - # Start with a shallow copy of self that preserves tool objects - combined = ChatOptions.from_dict(self.to_dict()) - combined.tool_choice = self.tool_choice - combined.tools = list(self.tools) if self.tools else None - combined.logit_bias = dict(self.logit_bias) if self.logit_bias else None - combined.metadata = dict(self.metadata) if self.metadata else None - combined.response_format = response_format - - # Apply scalar and mapping updates from the other options - updated_data = other.to_dict(exclude_none=True, exclude={"tools"}) - logit_bias = updated_data.pop("logit_bias", {}) - metadata = updated_data.pop("metadata", {}) - additional_properties: dict[str, Any] = updated_data.pop("additional_properties", {}) - - for key, value in updated_data.items(): - setattr(combined, key, value) - - combined.tool_choice = tool_choice - # Preserve response_format from other if it exists, otherwise keep self's - if other.response_format is not None: - combined.response_format = other.response_format - if other.instructions: - combined.instructions = "\n".join([combined.instructions or "", other.instructions or ""]) - - combined.logit_bias = ( - {**(combined.logit_bias or {}), **logit_bias} if logit_bias or combined.logit_bias else None - ) - combined.metadata = {**(combined.metadata or {}), **metadata} if metadata or combined.metadata else None - if combined.additional_properties and additional_properties: - combined.additional_properties.update(additional_properties) + @ai_function + def my_tool(x: int) -> int: + return x * 2 + + + # Single tool + tools = await validate_tools(my_tool) + + # List of tools + tools = await validate_tools([my_tool, another_tool]) + """ + # Sequence of tools - convert callables and expand MCP tools + final_tools: list[ToolProtocol | MutableMapping[str, Any]] = [] + if not tools: + return final_tools + if not isinstance(tools, Sequence) or isinstance(tools, (str, MutableMapping)): + # Single tool (not a sequence, or is a mapping which shouldn't be treated as sequence) + if not isinstance(tools, (ToolProtocol, MutableMapping)): + return [ai_function(tools)] + return [tools] + for tool in tools: + # Import MCPTool here to avoid circular imports + from ._mcp import MCPTool + + if isinstance(tool, MCPTool): + # Expand MCP tools to their constituent functions + if not tool.is_connected: + await tool.connect() + final_tools.extend(tool.functions) # type: ignore + elif isinstance(tool, (ToolProtocol, MutableMapping)): + final_tools.append(tool) else: - if additional_properties: - combined.additional_properties = additional_properties - if other_tools: - if combined.tools is None: - combined.tools = list(other_tools) + # Convert callable to AIFunction + final_tools.append(ai_function(tool)) + return final_tools + + +def validate_tool_mode( + tool_choice: ToolMode | Literal["auto", "required", "none"] | None, +) -> ToolMode: + """Validate and normalize tool_choice to a ToolMode dict. + + Args: + tool_choice: The tool choice value to validate. + + Returns: + A ToolMode dict (contains keys: "mode", and optionally "required_function_name"). + + Raises: + ContentError: If the tool_choice string is invalid. + """ + if not tool_choice: + return {"mode": "none"} + if isinstance(tool_choice, str): + if tool_choice not in ("auto", "required", "none"): + raise ContentError(f"Invalid tool choice: {tool_choice}") + return {"mode": tool_choice} + if "mode" not in tool_choice: + raise ContentError("tool_choice dict must contain 'mode' key") + if tool_choice["mode"] not in ("auto", "required", "none"): + raise ContentError(f"Invalid tool choice: {tool_choice['mode']}") + if tool_choice["mode"] != "required" and "required_function_name" in tool_choice: + raise ContentError("tool_choice with mode other than 'required' cannot have 'required_function_name'") + return tool_choice + + +def merge_chat_options( + base: dict[str, Any] | None, + override: dict[str, Any] | None, +) -> dict[str, Any]: + """Merge two chat options dictionaries. + + Values from override take precedence over base. + Lists and dicts are combined (not replaced). + Instructions are concatenated with newlines. + + Args: + base: The base options dictionary. + override: The override options dictionary. + + Returns: + A new merged options dictionary. + + Examples: + .. code-block:: python + + from agent_framework import merge_chat_options + + base = {"temperature": 0.5, "model_id": "gpt-4"} + override = {"temperature": 0.7, "max_tokens": 1000} + merged = merge_chat_options(base, override) + # {"temperature": 0.7, "model_id": "gpt-4", "max_tokens": 1000} + """ + if not base: + return dict(override) if override else {} + if not override: + return dict(base) + + result: dict[str, Any] = {} + + # Copy base values (shallow copy for simple values, dict copy for dicts) + for key, value in base.items(): + if isinstance(value, dict): + result[key] = dict(value) + elif isinstance(value, list): + result[key] = list(value) + else: + result[key] = value + + # Apply overrides + for key, value in override.items(): + if value is None: + continue + + if key == "instructions": + # Concatenate instructions + base_instructions = result.get("instructions") + if base_instructions: + result["instructions"] = f"{base_instructions}\n{value}" else: - for tool in other_tools: - if tool not in combined.tools: - combined.tools.append(tool) - return combined + result["instructions"] = value + elif key == "tools": + # Merge tools lists + base_tools = result.get("tools") + if base_tools and value: + # Add tools that aren't already present + merged_tools = list(base_tools) + for tool in value if isinstance(value, list) else [value]: + if tool not in merged_tools: + merged_tools.append(tool) + result["tools"] = merged_tools + elif value: + result["tools"] = list(value) if isinstance(value, list) else [value] + elif key in ("logit_bias", "metadata", "additional_properties"): + # Merge dicts + base_dict = result.get(key) + if base_dict and isinstance(value, dict): + result[key] = {**base_dict, **value} + elif value: + result[key] = dict(value) if isinstance(value, dict) else value + elif key == "tool_choice": + # tool_choice from override takes precedence + result["tool_choice"] = value if value else result.get("tool_choice") + elif key == "response_format": + # response_format from override takes precedence if set + result["response_format"] = value + else: + # Simple override + result[key] = value + + return result diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 4e0d2058ad..e469929bcc 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -359,7 +359,7 @@ async def _run_agent_streaming(self, ctx: WorkflowContext) -> AgentRunResponse | # Build the final AgentRunResponse from the collected updates if isinstance(self._agent, ChatAgent): - response_format = self._agent.chat_options.response_format + response_format = self._agent.default_options.get("response_format") response = AgentRunResponse.from_agent_run_response_updates( updates, output_format_type=response_format, diff --git a/python/packages/core/agent_framework/_workflows/_function_executor.py b/python/packages/core/agent_framework/_workflows/_function_executor.py index 417a4ee51b..d7b68c10fd 100644 --- a/python/packages/core/agent_framework/_workflows/_function_executor.py +++ b/python/packages/core/agent_framework/_workflows/_function_executor.py @@ -17,13 +17,19 @@ import asyncio import inspect +import sys import typing from collections.abc import Awaitable, Callable -from typing import Any, overload +from typing import Any from ._executor import Executor from ._workflow_context import WorkflowContext, validate_workflow_context_annotation +if sys.version_info >= (3, 11): + from typing import overload # pragma: no cover +else: + from typing_extensions import overload # pragma: no cover + class FunctionExecutor(Executor): """Executor that wraps a user-defined function. diff --git a/python/packages/core/agent_framework/_workflows/_group_chat.py b/python/packages/core/agent_framework/_workflows/_group_chat.py index ed99d5c34d..7381e757ad 100644 --- a/python/packages/core/agent_framework/_workflows/_group_chat.py +++ b/python/packages/core/agent_framework/_workflows/_group_chat.py @@ -1481,16 +1481,20 @@ def set_manager( display_name = manager.id if isinstance(manager, Executor) else manager.name or "manager" # Enforce ManagerSelectionResponse for ChatAgent managers - if isinstance(manager, ChatAgent): - configured_format = manager.chat_options.response_format - if configured_format is None: - manager.chat_options.response_format = ManagerSelectionResponse - elif configured_format is not ManagerSelectionResponse: - configured_format_name = getattr(configured_format, "__name__", str(configured_format)) - raise ValueError( - "Manager ChatAgent response_format must be ManagerSelectionResponse. " - f"Received '{configured_format_name}' for manager '{display_name}'." - ) + if ( + isinstance(manager, ChatAgent) + and manager.default_options.setdefault("response_format", ManagerSelectionResponse) + != ManagerSelectionResponse + ): + configured_format_name = getattr( + manager.default_options.get("response_format"), + "__name__", + str(manager.default_options.get("response_format")), + ) + raise ValueError( + "Manager ChatAgent response_format must be ManagerSelectionResponse. " + f"Received '{configured_format_name}' for manager '{display_name}'." + ) self._manager_participant = manager self._manager_name = display_name diff --git a/python/packages/core/agent_framework/_workflows/_handoff.py b/python/packages/core/agent_framework/_workflows/_handoff.py index 2dbf3bbb6e..a3f803ae87 100644 --- a/python/packages/core/agent_framework/_workflows/_handoff.py +++ b/python/packages/core/agent_framework/_workflows/_handoff.py @@ -92,20 +92,24 @@ def _handoff_tool(context: str | None = None) -> str: def _clone_chat_agent(agent: ChatAgent) -> ChatAgent: """Produce a deep copy of the ChatAgent while preserving runtime configuration.""" - options = agent.chat_options + options = agent.default_options middleware = list(agent.middleware or []) # Reconstruct the original tools list by combining regular tools with MCP tools. # ChatAgent.__init__ separates MCP tools into _local_mcp_tools during initialization, # so we need to recombine them here to pass the complete tools list to the constructor. # This makes sure MCP tools are preserved when cloning agents for handoff workflows. - all_tools = list(options.tools) if options.tools else [] + tools_from_options = options.get("tools") + all_tools = list(tools_from_options) if tools_from_options else [] if agent._local_mcp_tools: # type: ignore all_tools.extend(agent._local_mcp_tools) # type: ignore + logit_bias = options.get("logit_bias") + metadata = options.get("metadata") + return ChatAgent( chat_client=agent.chat_client, - instructions=options.instructions, + instructions=options.get("instructions"), id=agent.id, name=agent.name, description=agent.description, @@ -114,22 +118,21 @@ def _clone_chat_agent(agent: ChatAgent) -> ChatAgent: middleware=middleware, # Disable parallel tool calls to prevent the agent from invoking multiple handoff tools at once. allow_multiple_tool_calls=False, - frequency_penalty=options.frequency_penalty, - logit_bias=dict(options.logit_bias) if options.logit_bias else None, - max_tokens=options.max_tokens, - metadata=dict(options.metadata) if options.metadata else None, - model_id=options.model_id, - presence_penalty=options.presence_penalty, - response_format=options.response_format, - seed=options.seed, - stop=options.stop, - store=options.store, - temperature=options.temperature, - tool_choice=options.tool_choice, # type: ignore[arg-type] + frequency_penalty=options.get("frequency_penalty"), + logit_bias=dict(logit_bias) if logit_bias else None, + max_tokens=options.get("max_tokens"), + metadata=dict(metadata) if metadata else None, + model_id=options.get("model_id"), + presence_penalty=options.get("presence_penalty"), + response_format=options.get("response_format"), + seed=options.get("seed"), + stop=options.get("stop"), + store=options.get("store"), + temperature=options.get("temperature"), + tool_choice=options.get("tool_choice"), # type: ignore[arg-type] tools=all_tools if all_tools else None, - top_p=options.top_p, - user=options.user, - additional_chat_options=dict(options.additional_properties), + top_p=options.get("top_p"), + user=options.get("user"), ) @@ -1980,8 +1983,8 @@ def _apply_auto_tools(self, agent: ChatAgent, specialists: Mapping[str, Executor Returns: Dict mapping tool names (in various formats) to executor IDs for handoff resolution """ - chat_options = agent.chat_options - existing_tools = list(chat_options.tools or []) + default_options = agent.default_options + existing_tools = list(default_options.get("tools") or []) existing_names = {getattr(tool, "name", "") for tool in existing_tools if hasattr(tool, "name")} tool_targets: dict[str, str] = {} @@ -1998,9 +2001,9 @@ def _apply_auto_tools(self, agent: ChatAgent, specialists: Mapping[str, Executor tool_targets[alias.lower()] = executor.id if new_tools: - chat_options.tools = existing_tools + new_tools + default_options["tools"] = existing_tools + new_tools else: - chat_options.tools = existing_tools + default_options["tools"] = existing_tools return tool_targets diff --git a/python/packages/core/agent_framework/_workflows/_magentic.py b/python/packages/core/agent_framework/_workflows/_magentic.py index cdbc79e0c0..4697daba3c 100644 --- a/python/packages/core/agent_framework/_workflows/_magentic.py +++ b/python/packages/core/agent_framework/_workflows/_magentic.py @@ -2432,7 +2432,7 @@ def with_standard_manager( manager_agent = ChatAgent( name="Coordinator", chat_client=OpenAIChatClient(model_id="gpt-4o"), - chat_options=ChatOptions(temperature=0.3, seed=42), + options=ChatOptions(temperature=0.3, seed=42), instructions="Be concise and focus on accuracy", ) diff --git a/python/packages/core/agent_framework/anthropic/__init__.py b/python/packages/core/agent_framework/anthropic/__init__.py index 2f4decc1eb..ea03e6cdf0 100644 --- a/python/packages/core/agent_framework/anthropic/__init__.py +++ b/python/packages/core/agent_framework/anthropic/__init__.py @@ -5,7 +5,7 @@ IMPORT_PATH = "agent_framework_anthropic" PACKAGE_NAME = "agent-framework-anthropic" -_IMPORTS = ["__version__", "AnthropicClient"] +_IMPORTS = ["__version__", "AnthropicClient", "AnthropicChatOptions"] def __getattr__(name: str) -> Any: diff --git a/python/packages/core/agent_framework/anthropic/__init__.pyi b/python/packages/core/agent_framework/anthropic/__init__.pyi index a86586b98f..3d790ebb07 100644 --- a/python/packages/core/agent_framework/anthropic/__init__.pyi +++ b/python/packages/core/agent_framework/anthropic/__init__.pyi @@ -1,11 +1,13 @@ # Copyright (c) Microsoft. All rights reserved. from agent_framework_anthropic import ( + AnthropicChatOptions, AnthropicClient, __version__, ) __all__ = [ + "AnthropicChatOptions", "AnthropicClient", "__version__", ] diff --git a/python/packages/core/agent_framework/azure/__init__.py b/python/packages/core/agent_framework/azure/__init__.py index 7990361c97..74d1ec81f4 100644 --- a/python/packages/core/agent_framework/azure/__init__.py +++ b/python/packages/core/agent_framework/azure/__init__.py @@ -8,14 +8,19 @@ "AgentFunctionApp": ("agent_framework_azurefunctions", "agent-framework-azurefunctions"), "AgentResponseCallbackProtocol": ("agent_framework_azurefunctions", "agent-framework-azurefunctions"), "AzureAIAgentClient": ("agent_framework_azure_ai", "agent-framework-azure-ai"), + "AzureAIAgentOptions": ("agent_framework_azure_ai", "agent-framework-azure-ai"), "AzureAIClient": ("agent_framework_azure_ai", "agent-framework-azure-ai"), "AzureAISearchContextProvider": ("agent_framework_azure_ai_search", "agent-framework-azure-ai-search"), "AzureAISearchSettings": ("agent_framework_azure_ai_search", "agent-framework-azure-ai-search"), "AzureAISettings": ("agent_framework_azure_ai", "agent-framework-azure-ai"), "AzureOpenAIAssistantsClient": ("agent_framework.azure._assistants_client", "agent-framework-core"), + "AzureOpenAIAssistantsOptions": ("agent_framework.azure._assistants_client", "agent-framework-core"), "AzureOpenAIChatClient": ("agent_framework.azure._chat_client", "agent-framework-core"), + "AzureOpenAIChatOptions": ("agent_framework.azure._chat_client", "agent-framework-core"), "AzureOpenAIResponsesClient": ("agent_framework.azure._responses_client", "agent-framework-core"), + "AzureOpenAIResponsesOptions": ("agent_framework.azure._responses_client", "agent-framework-core"), "AzureOpenAISettings": ("agent_framework.azure._shared", "agent-framework-core"), + "AzureUserSecurityContext": ("agent_framework.azure._chat_client", "agent-framework-core"), "DurableAIAgent": ("agent_framework_azurefunctions", "agent-framework-azurefunctions"), "get_entra_auth_token": ("agent_framework.azure._entra_id_authentication", "agent-framework-core"), } diff --git a/python/packages/core/agent_framework/azure/_assistants_client.py b/python/packages/core/agent_framework/azure/_assistants_client.py index 58d2dbe309..a835310435 100644 --- a/python/packages/core/agent_framework/azure/_assistants_client.py +++ b/python/packages/core/agent_framework/azure/_assistants_client.py @@ -1,22 +1,47 @@ # Copyright (c) Microsoft. All rights reserved. +import sys from collections.abc import Mapping -from typing import TYPE_CHECKING, Any, ClassVar +from typing import TYPE_CHECKING, Any, ClassVar, Generic from openai.lib.azure import AsyncAzureADTokenProvider, AsyncAzureOpenAI from pydantic import ValidationError from ..exceptions import ServiceInitializationError from ..openai import OpenAIAssistantsClient +from ..openai._assistants_client import OpenAIAssistantsOptions from ._shared import AzureOpenAISettings if TYPE_CHECKING: from azure.core.credentials import TokenCredential +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore # pragma: no cover + +from typing import TypedDict + __all__ = ["AzureOpenAIAssistantsClient"] -class AzureOpenAIAssistantsClient(OpenAIAssistantsClient): +# region Azure OpenAI Assistants Options TypedDict + + +TAzureOpenAIAssistantsOptions = TypeVar( + "TAzureOpenAIAssistantsOptions", + bound=TypedDict, # type: ignore[valid-type] + default="OpenAIAssistantsOptions", + covariant=True, +) + + +# endregion + + +class AzureOpenAIAssistantsClient( + OpenAIAssistantsClient[TAzureOpenAIAssistantsOptions], Generic[TAzureOpenAIAssistantsOptions] +): """Azure OpenAI Assistants client.""" DEFAULT_AZURE_API_VERSION: ClassVar[str] = "2024-05-01-preview" @@ -95,6 +120,18 @@ def __init__( # Or loading from a .env file client = AzureOpenAIAssistantsClient(env_file_path="path/to/.env") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework.azure import AzureOpenAIAssistantsOptions + + + class MyOptions(AzureOpenAIAssistantsOptions, total=False): + my_custom_option: str + + + client: AzureOpenAIAssistantsClient[MyOptions] = AzureOpenAIAssistantsClient() + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ try: azure_openai_settings = AzureOpenAISettings( diff --git a/python/packages/core/agent_framework/azure/_chat_client.py b/python/packages/core/agent_framework/azure/_chat_client.py index 59f74259a4..248e79ee47 100644 --- a/python/packages/core/agent_framework/azure/_chat_client.py +++ b/python/packages/core/agent_framework/azure/_chat_client.py @@ -4,7 +4,7 @@ import logging import sys from collections.abc import Mapping -from typing import Any, TypeVar +from typing import Any, Generic, TypedDict from azure.core.credentials import TokenCredential from openai.lib.azure import AsyncAzureADTokenProvider, AsyncAzureOpenAI @@ -22,13 +22,17 @@ ) from agent_framework.exceptions import ServiceInitializationError from agent_framework.observability import use_instrumentation -from agent_framework.openai._chat_client import OpenAIBaseChatClient +from agent_framework.openai._chat_client import OpenAIBaseChatClient, OpenAIChatOptions from ._shared import ( AzureOpenAIConfigMixin, AzureOpenAISettings, ) +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore # pragma: no cover if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: @@ -36,6 +40,99 @@ logger: logging.Logger = logging.getLogger(__name__) +__all__ = ["AzureOpenAIChatClient", "AzureOpenAIChatOptions", "AzureUserSecurityContext"] + + +# region Azure OpenAI Chat Options TypedDict + + +class AzureUserSecurityContext(TypedDict, total=False): + """User security context for Azure AI applications. + + These fields help security operations teams investigate and mitigate security + incidents by providing context about the application and end user. + + Learn more: https://learn.microsoft.com/azure/well-architected/service-guides/cosmos-db + """ + + application_name: str + """Name of the application making the request.""" + + end_user_id: str + """Unique identifier for the end user (recommend hashing username/email).""" + + end_user_tenant_id: str + """Microsoft 365 tenant ID the end user belongs to. Required for multi-tenant apps.""" + + source_ip: str + """The original client's IP address.""" + + +class AzureOpenAIChatOptions(OpenAIChatOptions, total=False): + """Azure OpenAI-specific chat options dict. + + Extends OpenAIChatOptions with Azure-specific options including + the "On Your Data" feature and enhanced security context. + + See: https://learn.microsoft.com/azure/ai-foundry/openai/reference-preview-latest + + Keys: + # Inherited from OpenAIChatOptions/ChatOptions: + model_id: The model to use for the request, + translates to ``model`` in Azure OpenAI API. + temperature: Sampling temperature between 0 and 2. + top_p: Nucleus sampling parameter. + max_tokens: Maximum number of tokens to generate, + translates to ``max_completion_tokens`` in Azure OpenAI API. + stop: Stop sequences. + seed: Random seed for reproducibility. + frequency_penalty: Frequency penalty between -2.0 and 2.0. + presence_penalty: Presence penalty between -2.0 and 2.0. + tools: List of tools (functions) available to the model. + tool_choice: How the model should use tools. + allow_multiple_tool_calls: Whether to allow parallel tool calls, + translates to ``parallel_tool_calls`` in Azure OpenAI API. + response_format: Structured output schema. + metadata: Request metadata for tracking. + user: End-user identifier for abuse monitoring. + store: Whether to store the conversation. + instructions: System instructions for the model. + logit_bias: Token bias values (-100 to 100). + logprobs: Whether to return log probabilities. + top_logprobs: Number of top log probabilities to return (0-20). + + # Azure-specific options: + data_sources: Azure "On Your Data" data sources configuration. + user_security_context: Enhanced security context for Azure Defender. + n: Number of chat completions to generate (not recommended, incurs costs). + """ + + # Azure-specific options + data_sources: list[dict[str, Any]] + """Azure "On Your Data" data sources for retrieval-augmented generation. + + Supported types: azure_search, azure_cosmos_db, elasticsearch, pinecone, mongo_db. + See: https://learn.microsoft.com/azure/ai-foundry/openai/references/on-your-data + """ + + user_security_context: AzureUserSecurityContext + """Enhanced security context for Azure Defender integration.""" + + n: int + """Number of chat completion choices to generate for each input message. + Note: You will be charged based on tokens across all choices. Keep n=1 to minimize costs.""" + + +TAzureOpenAIChatOptions = TypeVar( + "TAzureOpenAIChatOptions", + bound=TypedDict, # type: ignore[valid-type] + default="AzureOpenAIChatOptions", + covariant=True, +) + + +# endregion + TChatResponse = TypeVar("TChatResponse", ChatResponse, ChatResponseUpdate) TAzureOpenAIChatClient = TypeVar("TAzureOpenAIChatClient", bound="AzureOpenAIChatClient") @@ -43,7 +140,9 @@ @use_function_invocation @use_instrumentation @use_chat_middleware -class AzureOpenAIChatClient(AzureOpenAIConfigMixin, OpenAIBaseChatClient): +class AzureOpenAIChatClient( + AzureOpenAIConfigMixin, OpenAIBaseChatClient[TAzureOpenAIChatOptions], Generic[TAzureOpenAIChatOptions] +): """Azure OpenAI Chat completion class.""" def __init__( @@ -103,17 +202,31 @@ def __init__( # Using environment variables # Set AZURE_OPENAI_ENDPOINT=https://your-endpoint.openai.azure.com - # Set AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=gpt-4 + # Set AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= # Set AZURE_OPENAI_API_KEY=your-key client = AzureOpenAIChatClient() # Or passing parameters directly client = AzureOpenAIChatClient( - endpoint="https://your-endpoint.openai.azure.com", deployment_name="gpt-4", api_key="your-key" + endpoint="https://your-endpoint.openai.azure.com", + deployment_name="", + api_key="your-key", ) # Or loading from a .env file client = AzureOpenAIChatClient(env_file_path="path/to/.env") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework.azure import AzureOpenAIChatOptions + + + class MyOptions(AzureOpenAIChatOptions, total=False): + my_custom_option: str + + + client: AzureOpenAIChatClient[MyOptions] = AzureOpenAIChatClient() + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ try: # Filter out any None values from the arguments diff --git a/python/packages/core/agent_framework/azure/_responses_client.py b/python/packages/core/agent_framework/azure/_responses_client.py index c967a6d1b8..e4f6989fa0 100644 --- a/python/packages/core/agent_framework/azure/_responses_client.py +++ b/python/packages/core/agent_framework/azure/_responses_client.py @@ -2,7 +2,7 @@ import sys from collections.abc import Mapping -from typing import Any, TypeVar +from typing import TYPE_CHECKING, Any, Generic, TypedDict from urllib.parse import urljoin from azure.core.credentials import TokenCredential @@ -19,18 +19,37 @@ AzureOpenAISettings, ) +if TYPE_CHECKING: + from agent_framework.openai._responses_client import OpenAIResponsesOptions + if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: from typing_extensions import override # type: ignore[import] # pragma: no cover +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore # pragma: no cover + +__all__ = ["AzureOpenAIResponsesClient"] -TAzureOpenAIResponsesClient = TypeVar("TAzureOpenAIResponsesClient", bound="AzureOpenAIResponsesClient") + +TAzureOpenAIResponsesOptions = TypeVar( + "TAzureOpenAIResponsesOptions", + bound=TypedDict, # type: ignore[valid-type] + default="OpenAIResponsesOptions", + covariant=True, +) @use_function_invocation @use_instrumentation @use_chat_middleware -class AzureOpenAIResponsesClient(AzureOpenAIConfigMixin, OpenAIBaseResponsesClient): +class AzureOpenAIResponsesClient( + AzureOpenAIConfigMixin, + OpenAIBaseResponsesClient[TAzureOpenAIResponsesOptions], + Generic[TAzureOpenAIResponsesOptions], +): """Azure Responses completion class.""" def __init__( @@ -101,6 +120,18 @@ def __init__( # Or loading from a .env file client = AzureOpenAIResponsesClient(env_file_path="path/to/.env") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework.azure import AzureOpenAIResponsesOptions + + + class MyOptions(AzureOpenAIResponsesOptions, total=False): + my_custom_option: str + + + client: AzureOpenAIResponsesClient[MyOptions] = AzureOpenAIResponsesClient() + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ if model_id := kwargs.pop("model_id", None) and not deployment_name: deployment_name = str(model_id) diff --git a/python/packages/core/agent_framework/observability.py b/python/packages/core/agent_framework/observability.py index 2a6a20db14..33f1dac830 100644 --- a/python/packages/core/agent_framework/observability.py +++ b/python/packages/core/agent_framework/observability.py @@ -59,7 +59,7 @@ TAgent = TypeVar("TAgent", bound="AgentProtocol") -TChatClient = TypeVar("TChatClient", bound="ChatClientProtocol") +TChatClient = TypeVar("TChatClient", bound="ChatClientProtocol[Any]") logger = get_logger() @@ -1063,6 +1063,8 @@ def decorator(func: Callable[..., Awaitable["ChatResponse"]]) -> Callable[..., A async def trace_get_response( self: "ChatClientProtocol", messages: "str | ChatMessage | list[str] | list[ChatMessage]", + *, + options: dict[str, Any] | None = None, **kwargs: Any, ) -> "ChatResponse": global OBSERVABILITY_SETTINGS @@ -1071,18 +1073,15 @@ async def trace_get_response( return await func( self, messages=messages, + options=options, **kwargs, ) if "token_usage_histogram" not in self.additional_properties: self.additional_properties["token_usage_histogram"] = _get_token_usage_histogram() if "operation_duration_histogram" not in self.additional_properties: self.additional_properties["operation_duration_histogram"] = _get_duration_histogram() - model_id = ( - kwargs.get("model_id") - or (chat_options.model_id if (chat_options := kwargs.get("chat_options")) else None) - or getattr(self, "model_id", None) - or "unknown" - ) + options = options or {} + model_id = kwargs.get("model_id") or options.get("model_id") or getattr(self, "model_id", None) or "unknown" service_url = str( service_url_func() if (service_url_func := getattr(self, "service_url", None)) and callable(service_url_func) @@ -1101,7 +1100,7 @@ async def trace_get_response( start_time_stamp = perf_counter() end_time_stamp: float | None = None try: - response = await func(self, messages=messages, **kwargs) + response = await func(self, messages=messages, options=options, **kwargs) end_time_stamp = perf_counter() except Exception as exception: end_time_stamp = perf_counter() @@ -1152,12 +1151,16 @@ def decorator( @wraps(func) async def trace_get_streaming_response( - self: "ChatClientProtocol", messages: "str | ChatMessage | list[str] | list[ChatMessage]", **kwargs: Any + self: "ChatClientProtocol", + messages: "str | ChatMessage | list[str] | list[ChatMessage]", + *, + options: dict[str, Any] | None = None, + **kwargs: Any, ) -> AsyncIterable["ChatResponseUpdate"]: global OBSERVABILITY_SETTINGS if not OBSERVABILITY_SETTINGS.ENABLED: # If model diagnostics are not enabled, just return the completion - async for update in func(self, messages=messages, **kwargs): + async for update in func(self, messages=messages, options=options, **kwargs): yield update return if "token_usage_histogram" not in self.additional_properties: @@ -1165,12 +1168,8 @@ async def trace_get_streaming_response( if "operation_duration_histogram" not in self.additional_properties: self.additional_properties["operation_duration_histogram"] = _get_duration_histogram() - model_id = ( - kwargs.get("model_id") - or (chat_options.model_id if (chat_options := kwargs.get("chat_options")) else None) - or getattr(self, "model_id", None) - or "unknown" - ) + options = options or {} + model_id = kwargs.get("model_id") or options.get("model_id") or getattr(self, "model_id", None) or "unknown" service_url = str( service_url_func() if (service_url_func := getattr(self, "service_url", None)) and callable(service_url_func) @@ -1194,7 +1193,7 @@ async def trace_get_streaming_response( start_time_stamp = perf_counter() end_time_stamp: float | None = None try: - async for update in func(self, messages=messages, **kwargs): + async for update in func(self, messages=messages, options=options, **kwargs): all_updates.append(update) yield update end_time_stamp = perf_counter() @@ -1341,7 +1340,11 @@ async def trace_run( if not OBSERVABILITY_SETTINGS.ENABLED: # If model diagnostics are not enabled, just return the completion return await run_func(self, messages=messages, thread=thread, **kwargs) - filtered_kwargs = {k: v for k, v in kwargs.items() if k != "chat_options"} + + from ._types import merge_chat_options + + default_options = getattr(self, "default_options", {}) + options = merge_chat_options(default_options, kwargs.get("options", {})) attributes = _get_span_attributes( operation_name=OtelAttr.AGENT_INVOKE_OPERATION, provider_name=provider_name, @@ -1349,8 +1352,8 @@ async def trace_run( agent_name=self.name or self.id, agent_description=self.description, thread_id=thread.service_thread_id if thread else None, - chat_options=getattr(self, "chat_options", None), - **filtered_kwargs, + all_options=options, + **kwargs, ) with _get_span(attributes=attributes, span_name_attribute=OtelAttr.AGENT_NAME) as span: if OBSERVABILITY_SETTINGS.SENSITIVE_DATA_ENABLED and messages: @@ -1358,7 +1361,7 @@ async def trace_run( span=span, provider_name=provider_name, messages=messages, - system_instructions=getattr(getattr(self, "chat_options", None), "instructions", None), + system_instructions=_get_instructions_from_options(options), ) try: response = await run_func(self, messages=messages, thread=thread, **kwargs) @@ -1409,11 +1412,12 @@ async def trace_run_streaming( yield streaming_agent_response return - from ._types import AgentRunResponse + from ._types import AgentRunResponse, merge_chat_options all_updates: list["AgentRunResponseUpdate"] = [] - filtered_kwargs = {k: v for k, v in kwargs.items() if k != "chat_options"} + default_options = getattr(self, "default_options", {}) + options = merge_chat_options(default_options, kwargs.get("options", {})) attributes = _get_span_attributes( operation_name=OtelAttr.AGENT_INVOKE_OPERATION, provider_name=provider_name, @@ -1421,8 +1425,8 @@ async def trace_run_streaming( agent_name=self.name or self.id, agent_description=self.description, thread_id=thread.service_thread_id if thread else None, - chat_options=getattr(self, "chat_options", None), - **filtered_kwargs, + all_options=options, + **kwargs, ) with _get_span(attributes=attributes, span_name_attribute=OtelAttr.AGENT_NAME) as span: if OBSERVABILITY_SETTINGS.SENSITIVE_DATA_ENABLED and messages: @@ -1430,7 +1434,7 @@ async def trace_run_streaming( span=span, provider_name=provider_name, messages=messages, - system_instructions=getattr(getattr(self, "chat_options", None), "instructions", None), + system_instructions=_get_instructions_from_options(options), ) try: async for update in run_streaming_func(self, messages=messages, thread=thread, **kwargs): @@ -1586,7 +1590,9 @@ def _get_span( Note: `attributes` must contain the `span_name_attribute` key. """ - span = get_tracer().start_span(f"{attributes[OtelAttr.OPERATION]} {attributes[span_name_attribute]}") + operation = attributes.get(OtelAttr.OPERATION, "operation") + span_name = attributes.get(span_name_attribute, "unknown") + span = get_tracer().start_span(f"{operation} {span_name}") span.set_attributes(attributes) with trace.use_span( span=span, @@ -1597,65 +1603,96 @@ def _get_span( yield current_span +def _get_instructions_from_options(options: Any) -> str | None: + """Extract instructions from options dict.""" + if options is None: + return None + if isinstance(options, dict): + return options.get("instructions") + return None + + +# Mapping configuration for extracting span attributes +# Each entry: source_keys -> (otel_attribute_key, transform_func, check_options_first, default_value) +# - source_keys: single key or list of keys to check (first non-None value wins) +# - otel_attribute_key: target OTEL attribute name +# - transform_func: optional transformation function, can return None to skip attribute +# - check_options_first: whether to check options dict before kwargs +# - default_value: optional default value if key is not found (use None to skip) +OTEL_ATTR_MAP: dict[str | tuple[str, ...], tuple[str, Callable[[Any], Any] | None, bool, Any]] = { + "choice_count": (OtelAttr.CHOICE_COUNT, None, False, 1), + "operation_name": (OtelAttr.OPERATION, None, False, None), + "system_name": (SpanAttributes.LLM_SYSTEM, None, False, None), + "provider_name": (OtelAttr.PROVIDER_NAME, None, False, None), + "service_url": (OtelAttr.ADDRESS, None, False, None), + "conversation_id": (OtelAttr.CONVERSATION_ID, None, True, None), + "seed": (OtelAttr.SEED, None, True, None), + "frequency_penalty": (OtelAttr.FREQUENCY_PENALTY, None, True, None), + "max_tokens": (SpanAttributes.LLM_REQUEST_MAX_TOKENS, None, True, None), + "stop": (OtelAttr.STOP_SEQUENCES, None, True, None), + "temperature": (SpanAttributes.LLM_REQUEST_TEMPERATURE, None, True, None), + "top_p": (SpanAttributes.LLM_REQUEST_TOP_P, None, True, None), + "presence_penalty": (OtelAttr.PRESENCE_PENALTY, None, True, None), + "top_k": (OtelAttr.TOP_K, None, True, None), + "encoding_formats": ( + OtelAttr.ENCODING_FORMATS, + lambda v: json.dumps(v if isinstance(v, list) else [v]), + True, + None, + ), + "agent_id": (OtelAttr.AGENT_ID, None, False, None), + "agent_name": (OtelAttr.AGENT_NAME, None, False, None), + "agent_description": (OtelAttr.AGENT_DESCRIPTION, None, False, None), + # Multiple source keys - checks model_id in options, then model in kwargs, then model_id in kwargs + ("model_id", "model"): (SpanAttributes.LLM_REQUEST_MODEL, None, True, None), + # Tools with validation - returns None if no valid tools + "tools": ( + OtelAttr.TOOL_DEFINITIONS, + lambda tools: ( + json.dumps(tools_dict) + if (tools_dict := __import__("agent_framework._tools", fromlist=["_tools_to_dict"])._tools_to_dict(tools)) + else None + ), + True, + None, + ), + # Error type extraction + "error": (OtelAttr.ERROR_TYPE, lambda e: type(e).__name__, False, None), + # thread_id overrides conversation_id - processed after conversation_id due to dict ordering + "thread_id": (OtelAttr.CONVERSATION_ID, None, False, None), +} + + def _get_span_attributes(**kwargs: Any) -> dict[str, Any]: """Get the span attributes from a kwargs dictionary.""" - from ._tools import _tools_to_dict - from ._types import ChatOptions - attributes: dict[str, Any] = {} - chat_options: ChatOptions | None = kwargs.get("chat_options") - if chat_options is None: - chat_options = ChatOptions() - if operation_name := kwargs.get("operation_name"): - attributes[OtelAttr.OPERATION] = operation_name - if choice_count := kwargs.get("choice_count", 1): - attributes[OtelAttr.CHOICE_COUNT] = choice_count - if system_name := kwargs.get("system_name"): - attributes[SpanAttributes.LLM_SYSTEM] = system_name - if provider_name := kwargs.get("provider_name"): - attributes[OtelAttr.PROVIDER_NAME] = provider_name - if model_id := kwargs.get("model", chat_options.model_id): - attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id - if service_url := kwargs.get("service_url"): - attributes[OtelAttr.ADDRESS] = service_url - if conversation_id := kwargs.get("conversation_id", chat_options.conversation_id): - attributes[OtelAttr.CONVERSATION_ID] = conversation_id - if seed := kwargs.get("seed", chat_options.seed): - attributes[OtelAttr.SEED] = seed - if frequency_penalty := kwargs.get("frequency_penalty", chat_options.frequency_penalty): - attributes[OtelAttr.FREQUENCY_PENALTY] = frequency_penalty - if max_tokens := kwargs.get("max_tokens", chat_options.max_tokens): - attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = max_tokens - if stop := kwargs.get("stop", chat_options.stop): - attributes[OtelAttr.STOP_SEQUENCES] = stop - if temperature := kwargs.get("temperature", chat_options.temperature): - attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = temperature - if top_p := kwargs.get("top_p", chat_options.top_p): - attributes[SpanAttributes.LLM_REQUEST_TOP_P] = top_p - if presence_penalty := kwargs.get("presence_penalty", chat_options.presence_penalty): - attributes[OtelAttr.PRESENCE_PENALTY] = presence_penalty - if top_k := kwargs.get("top_k"): - attributes[OtelAttr.TOP_K] = top_k - if encoding_formats := kwargs.get("encoding_formats"): - attributes[OtelAttr.ENCODING_FORMATS] = json.dumps( - encoding_formats if isinstance(encoding_formats, list) else [encoding_formats] - ) - if tools := kwargs.get("tools", chat_options.tools): - tools_as_json_list = _tools_to_dict(tools) - if tools_as_json_list: - attributes[OtelAttr.TOOL_DEFINITIONS] = json.dumps(tools_as_json_list) - if error := kwargs.get("error"): - attributes[OtelAttr.ERROR_TYPE] = type(error).__name__ - # agent attributes - if agent_id := kwargs.get("agent_id"): - attributes[OtelAttr.AGENT_ID] = agent_id - if agent_name := kwargs.get("agent_name"): - attributes[OtelAttr.AGENT_NAME] = agent_name - if agent_description := kwargs.get("agent_description"): - attributes[OtelAttr.AGENT_DESCRIPTION] = agent_description - if thread_id := kwargs.get("thread_id"): - # override if thread is set - attributes[OtelAttr.CONVERSATION_ID] = thread_id + options = kwargs.get("all_options", kwargs.get("options")) + if options is not None and not isinstance(options, dict): + options = None + + for source_keys, (otel_key, transform_func, check_options, default_value) in OTEL_ATTR_MAP.items(): + # Normalize to tuple of keys + keys = (source_keys,) if isinstance(source_keys, str) else source_keys + + value = None + for key in keys: + if check_options and options is not None: + value = options.get(key) + if value is None: + value = kwargs.get(key) + if value is not None: + break + + # Apply default value if no value found + if value is None and default_value is not None: + value = default_value + + if value is not None: + result = transform_func(value) if transform_func else value + # Allow transform_func to return None to skip attribute + if result is not None: + attributes[otel_key] = result + return attributes diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py index b6f97371b7..94b94c7506 100644 --- a/python/packages/core/agent_framework/openai/_assistants_client.py +++ b/python/packages/core/agent_framework/openai/_assistants_client.py @@ -2,8 +2,15 @@ import json import sys -from collections.abc import AsyncIterable, Awaitable, Callable, Mapping, MutableMapping, MutableSequence -from typing import Any, cast +from collections.abc import ( + AsyncIterable, + Awaitable, + Callable, + Mapping, + MutableMapping, + MutableSequence, +) +from typing import Any, Generic, Literal, TypedDict, cast from openai import AsyncOpenAI from openai.types.beta.threads import ( @@ -22,7 +29,12 @@ from .._clients import BaseChatClient from .._middleware import use_chat_middleware -from .._tools import AIFunction, HostedCodeInterpreterTool, HostedFileSearchTool, use_function_invocation +from .._tools import ( + AIFunction, + HostedCodeInterpreterTool, + HostedFileSearchTool, + use_function_invocation, +) from .._types import ( ChatMessage, ChatOptions, @@ -35,7 +47,6 @@ MCPServerToolCallContent, Role, TextContent, - ToolMode, UriContent, UsageContent, UsageDetails, @@ -45,19 +56,162 @@ from ..observability import use_instrumentation from ._shared import OpenAIConfigMixin, OpenAISettings +if sys.version_info >= (3, 13): + from typing import TypeVar +else: + from typing_extensions import TypeVar + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + if sys.version_info >= (3, 11): from typing import Self # pragma: no cover else: from typing_extensions import Self # pragma: no cover -__all__ = ["OpenAIAssistantsClient"] +__all__ = [ + "AssistantToolResources", + "OpenAIAssistantsClient", + "OpenAIAssistantsOptions", +] + + +# region OpenAI Assistants Options TypedDict + + +class VectorStoreToolResource(TypedDict, total=False): + """Vector store configuration for file search tool resources.""" + + vector_store_ids: list[str] + """IDs of vector stores attached to this assistant.""" + + +class CodeInterpreterToolResource(TypedDict, total=False): + """Code interpreter tool resource configuration.""" + + file_ids: list[str] + """File IDs accessible by the code interpreter tool. Max 20 files per assistant.""" + + +class AssistantToolResources(TypedDict, total=False): + """Tool resources attached to the assistant. + + See: https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-tool_resources + """ + + code_interpreter: CodeInterpreterToolResource + """Resources for code interpreter tool, including file IDs.""" + + file_search: VectorStoreToolResource + """Resources for file search tool, including vector store IDs.""" + + +class OpenAIAssistantsOptions(ChatOptions, total=False): + """OpenAI Assistants API-specific options dict. + + Extends base ChatOptions with Assistants API-specific parameters + for creating and running assistants. + + See: https://platform.openai.com/docs/api-reference/assistants + + Keys: + # Inherited from ChatOptions: + model_id: The model to use for the assistant, + translates to ``model`` in OpenAI API. + temperature: Sampling temperature between 0 and 2. + top_p: Nucleus sampling parameter. + max_tokens: Maximum number of tokens to generate, + translates to ``max_completion_tokens`` in OpenAI API. + tools: List of tools (functions, code_interpreter, file_search). + tool_choice: How the model should use tools. + allow_multiple_tool_calls: Whether to allow parallel tool calls, + translates to ``parallel_tool_calls`` in OpenAI API. + response_format: Structured output schema. + metadata: Request metadata for tracking. + + # Options not supported in Assistants API (inherited but unused): + stop: Not supported. + seed: Not supported (use assistant-level configuration instead). + frequency_penalty: Not supported. + presence_penalty: Not supported. + user: Not supported. + store: Not supported. + + # Assistants-specific options: + name: Name of the assistant. + description: Description of the assistant. + instructions: System instructions for the assistant. + tool_resources: Resources for tools (file IDs, vector stores). + reasoning_effort: Effort level for o-series reasoning models. + conversation_id: Thread ID to continue conversation in. + """ + + # Assistants-specific options + name: str + """Name of the assistant (max 256 characters).""" + + description: str + """Description of the assistant (max 512 characters).""" + + tool_resources: AssistantToolResources + """Tool-specific resources like file IDs and vector stores.""" + + reasoning_effort: Literal["low", "medium", "high"] + """Effort level for o-series reasoning models (o1, o3-mini). + Higher effort = more reasoning time and potentially better results.""" + + conversation_id: str # type: ignore[misc] + """Thread ID to continue a conversation in an existing thread.""" + + # OpenAI/ChatOptions fields not supported in Assistants API + stop: None # type: ignore[misc] + """Not supported in Assistants API.""" + + seed: None # type: ignore[misc] + """Not supported in Assistants API (use assistant-level configuration).""" + + frequency_penalty: None # type: ignore[misc] + """Not supported in Assistants API.""" + + presence_penalty: None # type: ignore[misc] + """Not supported in Assistants API.""" + + user: None # type: ignore[misc] + """Not supported in Assistants API.""" + + store: None # type: ignore[misc] + """Not supported in Assistants API.""" + + +ASSISTANTS_OPTION_TRANSLATIONS: dict[str, str] = { + "model_id": "model", + "max_tokens": "max_completion_tokens", + "allow_multiple_tool_calls": "parallel_tool_calls", +} +"""Maps ChatOptions keys to OpenAI Assistants API parameter names.""" + +TOpenAIAssistantsOptions = TypeVar( + "TOpenAIAssistantsOptions", + bound=TypedDict, # type: ignore[valid-type] + default="OpenAIAssistantsOptions", + covariant=True, +) + + +# endregion @use_function_invocation @use_instrumentation @use_chat_middleware -class OpenAIAssistantsClient(OpenAIConfigMixin, BaseChatClient): +class OpenAIAssistantsClient( + OpenAIConfigMixin, + BaseChatClient[TOpenAIAssistantsOptions], + Generic[TOpenAIAssistantsOptions], +): """OpenAI Assistants client.""" def __init__( @@ -118,6 +272,18 @@ def __init__( # Or loading from a .env file client = OpenAIAssistantsClient(env_file_path="path/to/.env") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework.openai import OpenAIAssistantsOptions + + + class MyOptions(OpenAIAssistantsOptions, total=False): + my_custom_option: str + + + client: OpenAIAssistantsClient[MyOptions] = OpenAIAssistantsClient(model_id="gpt-4") + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ try: openai_settings = OpenAISettings( @@ -159,7 +325,12 @@ async def __aenter__(self) -> "Self": """Async context manager entry.""" return self - async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any) -> None: + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: Any, + ) -> None: """Async context manager exit - clean up any assistants we created.""" await self.close() @@ -171,34 +342,32 @@ async def close(self) -> None: object.__setattr__(self, "assistant_id", None) object.__setattr__(self, "_should_delete_assistant", False) + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: return await ChatResponse.from_chat_response_generator( - updates=self._inner_get_streaming_response(messages=messages, chat_options=chat_options, **kwargs), - output_format_type=chat_options.response_format, + updates=self._inner_get_streaming_response(messages=messages, options=options, **kwargs), + output_format_type=options.get("response_format"), ) + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: # prepare - run_options, tool_results = self._prepare_options(messages, chat_options, **kwargs) + run_options, tool_results = self._prepare_options(messages, options, **kwargs) # Get the thread ID - thread_id: str | None = ( - chat_options.conversation_id - if chat_options.conversation_id is not None - else run_options.get("conversation_id", self.thread_id) - ) + thread_id: str | None = options.get("conversation_id", run_options.get("conversation_id", self.thread_id)) if thread_id is None and tool_results is not None: raise ValueError("No thread ID was provided, but chat messages includes tool results.") @@ -256,7 +425,9 @@ async def _create_assistant_stream( if thread_run is not None and tool_run_id is not None and tool_run_id == thread_run.id and tool_outputs: # There's an active run and we have tool results to submit, so submit the results. stream = client.beta.threads.runs.submit_tool_outputs_stream( # type: ignore[reportDeprecated] - run_id=tool_run_id, thread_id=thread_run.thread_id, tool_outputs=tool_outputs + run_id=tool_run_id, + thread_id=thread_run.thread_id, + tool_outputs=tool_outputs, ) final_thread_id = thread_run.thread_id else: @@ -408,7 +579,11 @@ def _parse_function_calls_from_assistants(self, event_data: Run, response_id: st function_name = tool_call.function.name function_arguments = json.loads(tool_call.function.arguments) contents.append( - FunctionCallContent(call_id=call_id, name=function_name, arguments=function_arguments) + FunctionCallContent( + call_id=call_id, + name=function_name, + arguments=function_arguments, + ) ) return contents @@ -416,59 +591,75 @@ def _parse_function_calls_from_assistants(self, event_data: Run, response_id: st def _prepare_options( self, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions | None, + options: dict[str, Any], **kwargs: Any, ) -> tuple[dict[str, Any], list[FunctionResultContent] | None]: + from .._types import validate_tool_mode + run_options: dict[str, Any] = {**kwargs} - if chat_options is not None: - run_options["max_completion_tokens"] = chat_options.max_tokens - run_options["model"] = chat_options.model_id - run_options["top_p"] = chat_options.top_p - run_options["temperature"] = chat_options.temperature - - if chat_options.allow_multiple_tool_calls is not None: - run_options["parallel_tool_calls"] = chat_options.allow_multiple_tool_calls - - if chat_options.tool_choice is not None: - tool_definitions: list[MutableMapping[str, Any]] = [] - if chat_options.tool_choice != "none" and chat_options.tools is not None: - for tool in chat_options.tools: - if isinstance(tool, AIFunction): - tool_definitions.append(tool.to_json_schema_spec()) # type: ignore[reportUnknownArgumentType] - elif isinstance(tool, HostedCodeInterpreterTool): - tool_definitions.append({"type": "code_interpreter"}) - elif isinstance(tool, HostedFileSearchTool): - params: dict[str, Any] = { - "type": "file_search", - } - if tool.max_results is not None: - params["max_num_results"] = tool.max_results - tool_definitions.append(params) - elif isinstance(tool, MutableMapping): - tool_definitions.append(tool) - - if len(tool_definitions) > 0: - run_options["tools"] = tool_definitions - - if chat_options.tool_choice == "none" or chat_options.tool_choice == "auto": - run_options["tool_choice"] = chat_options.tool_choice.mode - elif ( - isinstance(chat_options.tool_choice, ToolMode) - and chat_options.tool_choice == "required" - and chat_options.tool_choice.required_function_name is not None - ): - run_options["tool_choice"] = { - "type": "function", - "function": {"name": chat_options.tool_choice.required_function_name}, + # Extract options from the dict + max_tokens = options.get("max_tokens") + model_id = options.get("model_id") + top_p = options.get("top_p") + temperature = options.get("temperature") + allow_multiple_tool_calls = options.get("allow_multiple_tool_calls") + tool_choice = options.get("tool_choice") + tools = options.get("tools") + response_format = options.get("response_format") + + if max_tokens is not None: + run_options["max_completion_tokens"] = max_tokens + if model_id is not None: + run_options["model"] = model_id + if top_p is not None: + run_options["top_p"] = top_p + if temperature is not None: + run_options["temperature"] = temperature + + if allow_multiple_tool_calls is not None: + run_options["parallel_tool_calls"] = allow_multiple_tool_calls + + tool_mode = validate_tool_mode(tool_choice) + tool_definitions: list[MutableMapping[str, Any]] = [] + if tool_mode["mode"] != "none" and tools is not None: + for tool in tools: + if isinstance(tool, AIFunction): + tool_definitions.append(tool.to_json_schema_spec()) # type: ignore[reportUnknownArgumentType] + elif isinstance(tool, HostedCodeInterpreterTool): + tool_definitions.append({"type": "code_interpreter"}) + elif isinstance(tool, HostedFileSearchTool): + params: dict[str, Any] = { + "type": "file_search", } + if tool.max_results is not None: + params["max_num_results"] = tool.max_results + tool_definitions.append(params) + elif isinstance(tool, MutableMapping): + tool_definitions.append(tool) + + if len(tool_definitions) > 0: + run_options["tools"] = tool_definitions + + if (mode := tool_mode["mode"]) == "required" and ( + func_name := tool_mode.get("required_function_name") + ) is not None: + run_options["tool_choice"] = { + "type": "function", + "function": {"name": func_name}, + } + else: + run_options["tool_choice"] = mode - if chat_options.response_format is not None: + if response_format is not None: + if isinstance(response_format, dict): + run_options["response_format"] = response_format + else: run_options["response_format"] = { "type": "json_schema", "json_schema": { - "name": chat_options.response_format.__name__, - "schema": chat_options.response_format.model_json_schema(), + "name": response_format.__name__, + "schema": response_format.model_json_schema(), }, } diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py index e98f1bed35..2d1ef8b463 100644 --- a/python/packages/core/agent_framework/openai/_chat_client.py +++ b/python/packages/core/agent_framework/openai/_chat_client.py @@ -5,7 +5,7 @@ from collections.abc import AsyncIterable, Awaitable, Callable, Mapping, MutableMapping, MutableSequence, Sequence from datetime import datetime, timezone from itertools import chain -from typing import Any, TypeVar +from typing import Any, Generic, Literal, TypedDict from openai import AsyncOpenAI, BadRequestError from openai.lib._parsing._completions import type_to_response_format_param @@ -49,34 +49,105 @@ from ._exceptions import OpenAIContentFilterException from ._shared import OpenAIBase, OpenAIConfigMixin, OpenAISettings +if sys.version_info >= (3, 13): + from typing import TypeVar +else: + from typing_extensions import TypeVar + if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: from typing_extensions import override # type: ignore[import] # pragma: no cover -__all__ = ["OpenAIChatClient"] +__all__ = ["OpenAIChatClient", "OpenAIChatOptions"] logger = get_logger("agent_framework.openai") +# region OpenAI Chat Options TypedDict + + +class PredictionTextContent(TypedDict, total=False): + """Prediction text content options for OpenAI Chat completions.""" + + type: Literal["text"] + text: str + + +class Prediction(TypedDict, total=False): + """Prediction options for OpenAI Chat completions.""" + + type: Literal["content"] + content: str | list[PredictionTextContent] + + +class OpenAIChatOptions(ChatOptions, total=False): + """OpenAI-specific chat options dict. + + Extends ChatOptions with options specific to OpenAI's Chat Completions API. + + Keys: + model_id: The model to use for the request, + translates to ``model`` in OpenAI API. + temperature: Sampling temperature between 0 and 2. + top_p: Nucleus sampling parameter. + max_tokens: Maximum number of tokens to generate, + translates to ``max_completion_tokens`` in OpenAI API. + stop: Stop sequences. + seed: Random seed for reproducibility. + frequency_penalty: Frequency penalty between -2.0 and 2.0. + presence_penalty: Presence penalty between -2.0 and 2.0. + tools: List of tools (functions) available to the model. + tool_choice: How the model should use tools. + allow_multiple_tool_calls: Whether to allow parallel tool calls, + translates to ``parallel_tool_calls`` in OpenAI API. + response_format: Structured output schema. + metadata: Request metadata for tracking. + user: End-user identifier for abuse monitoring. + store: Whether to store the conversation. + instructions: System instructions for the model (prepended as system message). + # OpenAI-specific options (supported by all models): + logit_bias: Token bias values (-100 to 100). + logprobs: Whether to return log probabilities. + top_logprobs: Number of top log probabilities to return (0-20). + prediction: Whether to use predicted return tokens. + """ + + # OpenAI-specific generation parameters (supported by all models) + logit_bias: dict[str | int, float] # type: ignore[misc] + logprobs: bool + top_logprobs: int + prediction: Prediction + + +TOpenAIChatOptions = TypeVar("TOpenAIChatOptions", bound=TypedDict, default="OpenAIChatOptions", covariant=True) # type: ignore[valid-type] + +OPTION_TRANSLATIONS: dict[str, str] = { + "model_id": "model", + "allow_multiple_tool_calls": "parallel_tool_calls", + "max_tokens": "max_completion_tokens", +} + + # region Base Client -class OpenAIBaseChatClient(OpenAIBase, BaseChatClient): +class OpenAIBaseChatClient(OpenAIBase, BaseChatClient[TOpenAIChatOptions], Generic[TOpenAIChatOptions]): """OpenAI Chat completion class.""" + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: client = await self._ensure_client() # prepare - options_dict = self._prepare_options(messages, chat_options) + options_dict = self._prepare_options(messages, options) try: # execute and process return self._parse_response_from_openai( - await client.chat.completions.create(stream=False, **options_dict), chat_options + await client.chat.completions.create(stream=False, **options_dict), options ) except BadRequestError as ex: if ex.code == "content_filter": @@ -94,16 +165,17 @@ async def _inner_get_response( inner_exception=ex, ) from ex + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: client = await self._ensure_client() # prepare - options_dict = self._prepare_options(messages, chat_options) + options_dict = self._prepare_options(messages, options) options_dict["stream_options"] = {"include_usage": True} try: # execute and process @@ -129,49 +201,45 @@ async def _inner_get_streaming_response( # region content creation - def _prepare_tools_for_openai( - self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]] - ) -> list[dict[str, Any]]: + def _prepare_tools_for_openai(self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]]) -> dict[str, Any]: chat_tools: list[dict[str, Any]] = [] + web_search_options: dict[str, Any] | None = None for tool in tools: if isinstance(tool, ToolProtocol): match tool: case AIFunction(): chat_tools.append(tool.to_json_schema_spec()) + case HostedWebSearchTool(): + web_search_options = ( + { + "user_location": { + "approximate": tool.additional_properties.get("user_location", None), + "type": "approximate", + } + } + if tool.additional_properties and "user_location" in tool.additional_properties + else {} + ) case _: logger.debug("Unsupported tool passed (type: %s), ignoring", type(tool)) else: chat_tools.append(tool if isinstance(tool, dict) else dict(tool)) - return chat_tools + ret_dict: dict[str, Any] = {} + if chat_tools: + ret_dict["tools"] = chat_tools + if web_search_options is not None: + ret_dict["web_search_options"] = web_search_options + return ret_dict - def _process_web_search_tool( - self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]] - ) -> dict[str, Any] | None: - for tool in tools: - if isinstance(tool, HostedWebSearchTool): - # Web search tool requires special handling - return ( - { - "user_location": { - "approximate": tool.additional_properties.get("user_location", None), - "type": "approximate", - } - } - if tool.additional_properties and "user_location" in tool.additional_properties - else {} - ) + def _prepare_options(self, messages: MutableSequence[ChatMessage], options: dict[str, Any]) -> dict[str, Any]: + # Prepend instructions from options if they exist + from .._types import prepend_instructions_to_messages, validate_tool_mode - return None + if instructions := options.get("instructions"): + messages = prepend_instructions_to_messages(list(messages), instructions, role="system") - def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions) -> dict[str, Any]: - run_options = chat_options.to_dict( - exclude={ - "type", - "instructions", # included as system message - "response_format", # handled separately - "additional_properties", # handled separately - } - ) + # Start with a copy of options + run_options = {k: v for k, v in options.items() if v is not None and k not in {"instructions", "tools"}} # messages if messages and "messages" not in run_options: @@ -179,13 +247,8 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: if "messages" not in run_options: raise ServiceInvalidRequestError("Messages are required for chat completions") - # Translation between ChatOptions and Chat Completion API - translations = { - "model_id": "model", - "allow_multiple_tool_calls": "parallel_tool_calls", - "max_tokens": "max_completion_tokens", - } - for old_key, new_key in translations.items(): + # Translation between options keys and Chat Completion API + for old_key, new_key in OPTION_TRANSLATIONS.items(): if old_key in run_options and old_key != new_key: run_options[new_key] = run_options.pop(old_key) @@ -196,32 +259,33 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: run_options["model"] = self.model_id # tools - if chat_options.tools is not None: - # Preprocess web search tool if it exists - if web_search_options := self._process_web_search_tool(chat_options.tools): - run_options["web_search_options"] = web_search_options - run_options["tools"] = self._prepare_tools_for_openai(chat_options.tools) - if not run_options.get("tools", None): - run_options.pop("tools", None) + tools = options.get("tools") + if tools is not None: + run_options.update(self._prepare_tools_for_openai(tools)) + if not run_options.get("tools"): run_options.pop("parallel_tool_calls", None) run_options.pop("tool_choice", None) - # tool_choice: ToolMode serializes to {"type": "tool_mode", "mode": "..."}, extract mode - if (tool_choice := run_options.get("tool_choice")) and isinstance(tool_choice, dict) and "mode" in tool_choice: - run_options["tool_choice"] = tool_choice["mode"] + if tool_choice := run_options.pop("tool_choice", None): + tool_mode = validate_tool_mode(tool_choice) + if (mode := tool_mode.get("mode")) == "required" and ( + func_name := tool_mode.get("required_function_name") + ) is not None: + run_options["tool_choice"] = { + "type": "function", + "function": {"name": func_name}, + } + else: + run_options["tool_choice"] = mode # response format - if chat_options.response_format: - run_options["response_format"] = type_to_response_format_param(chat_options.response_format) - - # additional properties - additional_options = { - key: value for key, value in chat_options.additional_properties.items() if value is not None - } - if additional_options: - run_options.update(additional_options) + if response_format := options.get("response_format"): + if isinstance(response_format, dict): + run_options["response_format"] = response_format + else: + run_options["response_format"] = type_to_response_format_param(response_format) return run_options - def _parse_response_from_openai(self, response: ChatCompletion, chat_options: ChatOptions) -> "ChatResponse": + def _parse_response_from_openai(self, response: ChatCompletion, options: dict[str, Any]) -> "ChatResponse": """Parse a response from OpenAI into a ChatResponse.""" response_metadata = self._get_metadata_from_chat_response(response) messages: list[ChatMessage] = [] @@ -246,7 +310,7 @@ def _parse_response_from_openai(self, response: ChatCompletion, chat_options: Ch model_id=response.model, additional_properties=response_metadata, finish_reason=finish_reason, - response_format=chat_options.response_format, + response_format=options.get("response_format"), ) def _parse_response_update_from_openai( @@ -502,13 +566,11 @@ def service_url(self) -> str: # region Public client -TOpenAIChatClient = TypeVar("TOpenAIChatClient", bound="OpenAIChatClient") - @use_function_invocation @use_instrumentation @use_chat_middleware -class OpenAIChatClient(OpenAIConfigMixin, OpenAIBaseChatClient): +class OpenAIChatClient(OpenAIConfigMixin, OpenAIBaseChatClient[TOpenAIChatOptions], Generic[TOpenAIChatOptions]): """OpenAI Chat completion class.""" def __init__( @@ -552,14 +614,26 @@ def __init__( # Using environment variables # Set OPENAI_API_KEY=sk-... - # Set OPENAI_CHAT_MODEL_ID=gpt-4 + # Set OPENAI_CHAT_MODEL_ID= client = OpenAIChatClient() # Or passing parameters directly - client = OpenAIChatClient(model_id="gpt-4", api_key="sk-...") + client = OpenAIChatClient(model_id="", api_key="sk-...") # Or loading from a .env file client = OpenAIChatClient(env_file_path="path/to/.env") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework.openai import OpenAIChatOptions + + + class MyOptions(OpenAIChatOptions, total=False): + my_custom_option: str + + + client: OpenAIChatClient[MyOptions] = OpenAIChatClient(model_id="") + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ try: openai_settings = OpenAISettings( diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 5dc3db558b..37a35ae9bc 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +import sys from collections.abc import ( AsyncIterable, Awaitable, @@ -11,7 +12,7 @@ ) from datetime import datetime, timezone from itertools import chain -from typing import Any, TypeVar, cast +from typing import Any, Generic, Literal, TypedDict, cast from openai import AsyncOpenAI, BadRequestError from openai.types.responses.file_search_tool_param import FileSearchToolParam @@ -30,9 +31,6 @@ Mcp, ToolParam, ) -from openai.types.responses.web_search_tool_param import ( - UserLocation as WebSearchUserLocation, -) from openai.types.responses.web_search_tool_param import WebSearchToolParam from pydantic import BaseModel, ValidationError @@ -78,6 +76,8 @@ UsageDetails, _parse_content, prepare_function_call_results, + prepend_instructions_to_messages, + validate_tool_mode, ) from ..exceptions import ( ServiceInitializationError, @@ -88,37 +88,150 @@ from ._exceptions import OpenAIContentFilterException from ._shared import OpenAIBase, OpenAIConfigMixin, OpenAISettings +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore # pragma: no cover +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + logger = get_logger("agent_framework.openai") -__all__ = ["OpenAIResponsesClient"] +__all__ = ["OpenAIResponsesClient", "OpenAIResponsesOptions"] + + +# region OpenAI Responses Options TypedDict + + +class ReasoningOptions(TypedDict, total=False): + """Configuration options for reasoning models (gpt-5, o-series). + + See: https://platform.openai.com/docs/guides/reasoning + """ + + effort: Literal["low", "medium", "high"] + """The effort level for reasoning. Higher effort means more reasoning tokens.""" + + summary: Literal["auto", "concise", "detailed"] + """How to summarize reasoning in the response.""" + + +class StreamOptions(TypedDict, total=False): + """Options for streaming responses.""" + + include_usage: bool + """Whether to include usage statistics in stream events.""" + + +class OpenAIResponsesOptions(ChatOptions, total=False): + """OpenAI Responses API-specific chat options. + + Extends ChatOptions with options specific to OpenAI's Responses API. + These options provide fine-grained control over response generation, + reasoning, and API behavior. + + See: https://platform.openai.com/docs/api-reference/responses/create + """ + + # Responses API-specific parameters + + include: list[str] + """Additional output data to include in the response. + Supported values include: + - 'web_search_call.action.sources' + - 'code_interpreter_call.outputs' + - 'file_search_call.results' + - 'message.input_image.image_url' + - 'message.output_text.logprobs' + - 'reasoning.encrypted_content' + """ + + max_tool_calls: int + """Maximum number of total calls to built-in tools in a response.""" + + prompt: dict[str, Any] + """Reference to a prompt template and its variables. + Learn more: https://platform.openai.com/docs/guides/text#reusable-prompts""" + + prompt_cache_key: str + """Used by OpenAI to cache responses for similar requests. + Replaces the deprecated 'user' field for caching purposes.""" + + prompt_cache_retention: Literal["24h"] + """Retention policy for prompt cache. Set to '24h' for extended caching.""" + + reasoning: ReasoningOptions + """Configuration for reasoning models (gpt-5, o-series). + See: https://platform.openai.com/docs/guides/reasoning""" + + safety_identifier: str + """A stable identifier for detecting policy violations. + Recommend hashing username/email to avoid sending identifying info.""" + + service_tier: Literal["auto", "default", "flex", "priority"] + """Processing type for serving the request. + - 'auto': Use project settings + - 'default': Standard pricing/performance + - 'flex': Flexible processing + - 'priority': Priority processing""" + + stream_options: StreamOptions + """Options for streaming responses. Only set when stream=True.""" + + top_logprobs: int + """Number of most likely tokens (0-20) to return at each position.""" + + truncation: Literal["auto", "disabled"] + """Truncation strategy for model response. + - 'auto': Truncate from beginning if exceeds context + - 'disabled': Fail with 400 error if exceeds context""" + + +TOpenAIResponsesOptions = TypeVar( + "TOpenAIResponsesOptions", + bound=TypedDict, # type: ignore[valid-type] + default="OpenAIResponsesOptions", + covariant=True, +) + + +# endregion + # region ResponsesClient -class OpenAIBaseResponsesClient(OpenAIBase, BaseChatClient): +class OpenAIBaseResponsesClient( + OpenAIBase, + BaseChatClient[TOpenAIResponsesOptions], + Generic[TOpenAIResponsesOptions], +): """Base class for all OpenAI Responses based API's.""" FILE_SEARCH_MAX_RESULTS: int = 50 # region Inner Methods + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: client = await self._ensure_client() # prepare - run_options = await self._prepare_options(messages, chat_options, **kwargs) + run_options = await self._prepare_options(messages, options, **kwargs) try: # execute and process if "text_format" in run_options: response = await client.responses.parse(stream=False, **run_options) else: response = await client.responses.create(stream=False, **run_options) - return self._parse_response_from_openai(response, chat_options=chat_options) + return self._parse_response_from_openai(response, options=options) except BadRequestError as ex: if ex.code == "content_filter": raise OpenAIContentFilterException( @@ -135,16 +248,17 @@ async def _inner_get_response( inner_exception=ex, ) from ex + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: client = await self._ensure_client() # prepare - run_options = await self._prepare_options(messages, chat_options, **kwargs) + run_options = await self._prepare_options(messages, options, **kwargs) function_call_ids: dict[int, tuple[str, str]] = {} # output_index: (call_id, name) try: # execute and process @@ -152,7 +266,7 @@ async def _inner_get_streaming_response( async for chunk in await client.responses.create(stream=True, **run_options): yield self._parse_chunk_from_openai( chunk, - chat_options=chat_options, + options=options, function_call_ids=function_call_ids, ) return @@ -160,7 +274,7 @@ async def _inner_get_streaming_response( async for chunk in response: yield self._parse_chunk_from_openai( chunk, - chat_options=chat_options, + options=options, function_call_ids=function_call_ids, ) except BadRequestError as ex: @@ -319,25 +433,30 @@ def _prepare_tools_for_openai( ) ) case HostedWebSearchTool(): - location: dict[str, str] | None = ( + web_search_tool = WebSearchToolParam(type="web_search") + if location := ( tool.additional_properties.get("user_location", None) if tool.additional_properties else None - ) - response_tools.append( - WebSearchToolParam( - type="web_search", - user_location=WebSearchUserLocation( - type="approximate", - city=location.get("city", None), - country=location.get("country", None), - region=location.get("region", None), - timezone=location.get("timezone", None), - ) - if location - else None, - ) - ) + ): + web_search_tool["user_location"] = { + "type": "approximate", + "city": location.get("city", None), + "country": location.get("country", None), + "region": location.get("region", None), + "timezone": location.get("timezone", None), + } + if filters := ( + tool.additional_properties.get("filters", None) if tool.additional_properties else None + ): + web_search_tool["filters"] = filters + if search_context_size := ( + tool.additional_properties.get("search_context_size", None) + if tool.additional_properties + else None + ): + web_search_tool["search_context_size"] = search_context_size + response_tools.append(web_search_tool) case HostedImageGenerationTool(): mapped_tool: dict[str, Any] = {"type": "image_generation"} if tool.options: @@ -389,25 +508,29 @@ def _prepare_mcp_tool(tool: HostedMCPTool) -> Mcp: async def _prepare_options( self, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> dict[str, Any]: - """Take ChatOptions and create the specific options for Responses API.""" - run_options: dict[str, Any] = chat_options.to_dict( - exclude={ - "type", - "presence_penalty", # not supported - "frequency_penalty", # not supported - "logit_bias", # not supported - "seed", # not supported - "stop", # not supported - "instructions", # already added as system message - "response_format", # handled separately - "conversation_id", # handled separately - "additional_properties", # handled separately - } - ) + """Take options dict and create the specific options for Responses API.""" + # Exclude keys that are not supported or handled separately + exclude_keys = { + "type", + "presence_penalty", # not supported + "frequency_penalty", # not supported + "logit_bias", # not supported + "seed", # not supported + "stop", # not supported + "instructions", # already added as system message + "response_format", # handled separately + "conversation_id", # handled separately + "tool_choice", # handled separately + } + run_options: dict[str, Any] = {k: v for k, v in options.items() if k not in exclude_keys and v is not None} + # messages + # Handle instructions by prepending to messages as system message + if instructions := options.get("instructions"): + messages = prepend_instructions_to_messages(list(messages), instructions, role="system") request_input = self._prepare_messages_for_openai(messages) if not request_input: raise ServiceInvalidRequestError("Messages are required for chat completions") @@ -416,7 +539,7 @@ async def _prepare_options( # model id self._check_model_presence(run_options) - # translations between ChatOptions and Responses API + # translations between options and Responses API translations = { "model_id": "model", "allow_multiple_tool_calls": "parallel_tool_calls", @@ -428,7 +551,7 @@ async def _prepare_options( run_options[new_key] = run_options.pop(old_key) # Handle different conversation ID formats - if conversation_id := self._get_current_conversation_id(chat_options, **kwargs): + if conversation_id := self._get_current_conversation_id(options, **kwargs): if conversation_id.startswith("resp_"): # For response IDs, set previous_response_id and remove conversation property run_options["previous_response_id"] = conversation_id @@ -440,32 +563,27 @@ async def _prepare_options( run_options["previous_response_id"] = conversation_id # tools - if tools := self._prepare_tools_for_openai(chat_options.tools): + if tools := self._prepare_tools_for_openai(options.get("tools")): run_options["tools"] = tools + # tool_choice: convert ToolMode to appropriate format + if tool_choice := options.get("tool_choice"): + tool_mode = validate_tool_mode(tool_choice) + if (mode := tool_mode.get("mode")) == "required" and ( + func_name := tool_mode.get("required_function_name") + ) is not None: + run_options["tool_choice"] = { + "type": "function", + "name": func_name, + } + else: + run_options["tool_choice"] = mode else: run_options.pop("parallel_tool_calls", None) run_options.pop("tool_choice", None) - # tool_choice: ToolMode serializes to {"type": "tool_mode", "mode": "..."}, extract mode - if (tool_choice := run_options.get("tool_choice")) and isinstance(tool_choice, dict) and "mode" in tool_choice: - run_options["tool_choice"] = tool_choice["mode"] - - # additional properties (excluding response_format which is handled separately) - additional_options = { - key: value - for key, value in chat_options.additional_properties.items() - if value is not None and key != "response_format" - } - if additional_options: - run_options.update(additional_options) - - # response format and text config (after additional_properties so user can pass text via additional_properties) - # Check both chat_options.response_format and additional_properties for response_format - response_format: Any = ( - chat_options.response_format - if chat_options.response_format is not None - else chat_options.additional_properties.get("response_format") - ) - text_config: Any = run_options.pop("text", None) + + # response format and text config + response_format = options.get("response_format") + text_config = run_options.pop("text", None) response_format, text_config = self._prepare_response_and_text_format( response_format=response_format, text_config=text_config ) @@ -476,19 +594,19 @@ async def _prepare_options( return run_options - def _check_model_presence(self, run_options: dict[str, Any]) -> None: + def _check_model_presence(self, options: dict[str, Any]) -> None: """Check if the 'model' param is present, and if not raise a Error. Since AzureAIClients use a different param for this, this method is overridden in those clients. """ - if not run_options.get("model"): + if not options.get("model"): if not self.model_id: raise ValueError("model_id must be a non-empty string") - run_options["model"] = self.model_id + options["model"] = self.model_id - def _get_current_conversation_id(self, chat_options: ChatOptions, **kwargs: Any) -> str | None: - """Get the current conversation ID from chat options or kwargs.""" - return chat_options.conversation_id or kwargs.get("conversation_id") + def _get_current_conversation_id(self, options: dict[str, Any], **kwargs: Any) -> str | None: + """Get the current conversation ID from options dict or kwargs.""" + return options.get("conversation_id") or kwargs.get("conversation_id") def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: """Prepare the chat messages for a request. @@ -680,7 +798,7 @@ def _prepare_content_for_openai( def _parse_response_from_openai( self, response: OpenAIResponse | ParsedResponse[BaseModel], - chat_options: ChatOptions, + options: dict[str, Any], ) -> "ChatResponse": """Parse an OpenAI Responses API response into a ChatResponse.""" structured_response: BaseModel | None = response.output_parsed if isinstance(response, ParsedResponse) else None # type: ignore[reportUnknownMemberType] @@ -918,20 +1036,22 @@ def _parse_response_from_openai( "raw_representation": response, } - if conversation_id := self._get_conversation_id(response, chat_options.store): + if conversation_id := self._get_conversation_id(response, options.get("store")): args["conversation_id"] = conversation_id if response.usage and (usage_details := self._parse_usage_from_openai(response.usage)): args["usage_details"] = usage_details if structured_response: args["value"] = structured_response - elif chat_options.response_format: - args["response_format"] = chat_options.response_format + elif (response_format := options.get("response_format")) and isinstance(response_format, type): + # Only pass response_format to ChatResponse if it's a Pydantic model type, + # not a runtime JSON schema dict + args["response_format"] = response_format return ChatResponse(**args) def _parse_chunk_from_openai( self, event: OpenAIResponseStreamEvent, - chat_options: ChatOptions, + options: dict[str, Any], function_call_ids: dict[int, tuple[str, str]], ) -> ChatResponseUpdate: """Parse an OpenAI Responses API streaming event into a ChatResponseUpdate.""" @@ -1023,13 +1143,13 @@ def _parse_chunk_from_openai( metadata.update(self._get_metadata_from_response(event)) case "response.created": response_id = event.response.id - conversation_id = self._get_conversation_id(event.response, chat_options.store) + conversation_id = self._get_conversation_id(event.response, options.get("store")) case "response.in_progress": response_id = event.response.id - conversation_id = self._get_conversation_id(event.response, chat_options.store) + conversation_id = self._get_conversation_id(event.response, options.get("store")) case "response.completed": response_id = event.response.id - conversation_id = self._get_conversation_id(event.response, chat_options.store) + conversation_id = self._get_conversation_id(event.response, options.get("store")) model = event.response.model if event.response.usage: usage = self._parse_usage_from_openai(event.response.usage) @@ -1296,13 +1416,14 @@ def _get_metadata_from_response(self, output: Any) -> dict[str, Any]: return {} -TOpenAIResponsesClient = TypeVar("TOpenAIResponsesClient", bound="OpenAIResponsesClient") - - @use_function_invocation @use_instrumentation @use_chat_middleware -class OpenAIResponsesClient(OpenAIConfigMixin, OpenAIBaseResponsesClient): +class OpenAIResponsesClient( + OpenAIConfigMixin, + OpenAIBaseResponsesClient[TOpenAIResponsesOptions], + Generic[TOpenAIResponsesOptions], +): """OpenAI Responses client class.""" def __init__( @@ -1355,6 +1476,18 @@ def __init__( # Or loading from a .env file client = OpenAIResponsesClient(env_file_path="path/to/.env") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework.openai import OpenAIResponsesOptions + + + class MyOptions(OpenAIResponsesOptions, total=False): + my_custom_option: str + + + client: OpenAIResponsesClient[MyOptions] = OpenAIResponsesClient(model_id="gpt-4o") + response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ try: openai_settings = OpenAISettings( diff --git a/python/packages/core/agent_framework/openai/_shared.py b/python/packages/core/agent_framework/openai/_shared.py index 77189168f1..8adc59dddc 100644 --- a/python/packages/core/agent_framework/openai/_shared.py +++ b/python/packages/core/agent_framework/openai/_shared.py @@ -24,7 +24,6 @@ from .._pydantic import AFBaseSettings from .._serialization import SerializationMixin from .._telemetry import APP_INFO, USER_AGENT_KEY, prepend_agent_framework_to_user_agent -from .._types import ChatOptions from ..exceptions import ServiceInitializationError logger: logging.Logger = get_logger("agent_framework.openai") @@ -43,7 +42,7 @@ _legacy_response.HttpxBinaryResponseContent, ] -OPTION_TYPE = Union[ChatOptions, dict[str, Any]] +OPTION_TYPE = dict[str, Any] __all__ = ["OpenAISettings"] diff --git a/python/packages/core/tests/azure/test_azure_assistants_client.py b/python/packages/core/tests/azure/test_azure_assistants_client.py index 758be68d3b..ae4ad0e89b 100644 --- a/python/packages/core/tests/azure/test_azure_assistants_client.py +++ b/python/packages/core/tests/azure/test_azure_assistants_client.py @@ -299,8 +299,7 @@ async def test_azure_assistants_client_get_response_tools() -> None: # Test that the client can be used to get a response response = await azure_assistants_client.get_response( messages=messages, - tools=[get_weather], - tool_choice="auto", + options={"tools": [get_weather], "tool_choice": "auto"}, ) assert response is not None @@ -352,8 +351,7 @@ async def test_azure_assistants_client_streaming_tools() -> None: # Test that the client can be used to get a response response = azure_assistants_client.get_streaming_response( messages=messages, - tools=[get_weather], - tool_choice="auto", + options={"tools": [get_weather], "tool_choice": "auto"}, ) full_message: str = "" async for chunk in response: diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py index 7da838529f..df9c52943a 100644 --- a/python/packages/core/tests/azure/test_azure_chat_client.py +++ b/python/packages/core/tests/azure/test_azure_chat_client.py @@ -212,7 +212,7 @@ async def test_cmc_with_logit_bias( azure_chat_client = AzureOpenAIChatClient() - await azure_chat_client.get_response(messages=chat_history, logit_bias=token_bias) + await azure_chat_client.get_response(messages=chat_history, options={"logit_bias": token_bias}) mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], @@ -237,7 +237,7 @@ async def test_cmc_with_stop( azure_chat_client = AzureOpenAIChatClient() - await azure_chat_client.get_response(messages=chat_history, stop=stop) + await azure_chat_client.get_response(messages=chat_history, options={"stop": stop}) mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], @@ -300,7 +300,7 @@ async def test_azure_on_your_data( content = await azure_chat_client.get_response( messages=messages_in, - additional_properties={"extra_body": expected_data_settings}, + options={"extra_body": expected_data_settings}, ) assert len(content.messages) == 1 assert len(content.messages[0].contents) == 1 @@ -370,7 +370,7 @@ async def test_azure_on_your_data_string( content = await azure_chat_client.get_response( messages=messages_in, - additional_properties={"extra_body": expected_data_settings}, + options={"extra_body": expected_data_settings}, ) assert len(content.messages) == 1 assert len(content.messages[0].contents) == 1 @@ -429,7 +429,7 @@ async def test_azure_on_your_data_fail( content = await azure_chat_client.get_response( messages=messages_in, - additional_properties={"extra_body": expected_data_settings}, + options={"extra_body": expected_data_settings}, ) assert len(content.messages) == 1 assert len(content.messages[0].contents) == 1 @@ -652,8 +652,7 @@ async def test_azure_openai_chat_client_response_tools() -> None: # Test that the client can be used to get a response response = await azure_chat_client.get_response( messages=messages, - tools=[get_story_text], - tool_choice="auto", + options={"tools": [get_story_text], "tool_choice": "auto"}, ) assert response is not None @@ -709,8 +708,7 @@ async def test_azure_openai_chat_client_streaming_tools() -> None: # Test that the client can be used to get a response response = azure_chat_client.get_streaming_response( messages=messages, - tools=[get_story_text], - tool_choice="auto", + options={"tools": [get_story_text], "tool_choice": "auto"}, ) full_message: str = "" async for chunk in response: diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index ec19eaf833..352b7798d3 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -1,26 +1,25 @@ # Copyright (c) Microsoft. All rights reserved. +import json import os -from typing import Annotated +from typing import Annotated, Any import pytest from azure.identity import AzureCliCredential from pydantic import BaseModel +from pytest import param from agent_framework import ( AgentRunResponse, - AgentRunResponseUpdate, - AgentThread, ChatAgent, ChatClientProtocol, ChatMessage, ChatResponse, - ChatResponseUpdate, HostedCodeInterpreterTool, HostedFileSearchTool, HostedMCPTool, HostedVectorStoreContent, - TextContent, + HostedWebSearchTool, ai_function, ) from agent_framework.azure import AzureOpenAIResponsesClient @@ -74,7 +73,7 @@ async def delete_vector_store(client: AzureOpenAIResponsesClient, file_id: str, def test_init(azure_openai_unit_test_env: dict[str, str]) -> None: # Test successful initialization - azure_responses_client = AzureOpenAIResponsesClient() + azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) assert azure_responses_client.model_id == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] assert isinstance(azure_responses_client, ChatClientProtocol) @@ -141,283 +140,286 @@ def test_serialize(azure_openai_unit_test_env: dict[str, str]) -> None: assert "User-Agent" not in dumped_settings["default_headers"] -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_response() -> None: - """Test azure responses client responses.""" - azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) - - assert isinstance(azure_responses_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append( - ChatMessage( - role="user", - text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " - "Bonded by their love for the natural world and shared curiosity, they uncovered a " - "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " - "of climate change.", - ) - ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) - - # Test that the client can be used to get a response - response = await azure_responses_client.get_response(messages=messages) - - assert response is not None - assert isinstance(response, ChatResponse) - assert "scientists" in response.text - - messages.clear() - messages.append(ChatMessage(role="user", text="The weather in New York is sunny")) - messages.append(ChatMessage(role="user", text="What is the weather in New York?")) - - # Test that the client can be used to get a structured response - structured_response = await azure_responses_client.get_response( # type: ignore[reportAssignmentType] - messages=messages, - response_format=OutputStruct, - ) - - assert structured_response is not None - assert isinstance(structured_response, ChatResponse) - assert isinstance(structured_response.value, OutputStruct) - assert structured_response.value.location == "New York" - assert "sunny" in structured_response.value.weather.lower() +# region Integration Tests @pytest.mark.flaky @skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_response_tools() -> None: - """Test azure responses client tools.""" - azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) - - assert isinstance(azure_responses_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What is the weather in New York?")) - - # Test that the client can be used to get a response - response = await azure_responses_client.get_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - ) +@pytest.mark.parametrize( + "option_name,option_value,needs_validation", + [ + # Simple ChatOptions - just verify they don't fail + param("temperature", 0.7, False, id="temperature"), + param("top_p", 0.9, False, id="top_p"), + param("max_tokens", 500, False, id="max_tokens"), + param("seed", 123, False, id="seed"), + param("user", "test-user-id", False, id="user"), + param("metadata", {"test_key": "test_value"}, False, id="metadata"), + param("frequency_penalty", 0.5, False, id="frequency_penalty"), + param("presence_penalty", 0.3, False, id="presence_penalty"), + param("stop", ["END"], False, id="stop"), + param("allow_multiple_tool_calls", True, False, id="allow_multiple_tool_calls"), + param("tool_choice", "none", True, id="tool_choice_none"), + # OpenAIResponsesOptions - just verify they don't fail + param("safety_identifier", "user-hash-abc123", False, id="safety_identifier"), + param("truncation", "auto", False, id="truncation"), + param("top_logprobs", 5, False, id="top_logprobs"), + param("prompt_cache_key", "test-cache-key", False, id="prompt_cache_key"), + param("max_tool_calls", 3, False, id="max_tool_calls"), + # Complex options requiring output validation + param("tools", [get_weather], True, id="tools_function"), + param("tool_choice", "auto", True, id="tool_choice_auto"), + param( + "tool_choice", + {"mode": "required", "required_function_name": "get_weather"}, + True, + id="tool_choice_required", + ), + param("response_format", OutputStruct, True, id="response_format_pydantic"), + param( + "response_format", + { + "type": "json_schema", + "json_schema": { + "name": "WeatherDigest", + "strict": True, + "schema": { + "title": "WeatherDigest", + "type": "object", + "properties": { + "location": {"type": "string"}, + "conditions": {"type": "string"}, + "temperature_c": {"type": "number"}, + "advisory": {"type": "string"}, + }, + "required": ["location", "conditions", "temperature_c", "advisory"], + "additionalProperties": False, + }, + }, + }, + True, + id="response_format_runtime_json_schema", + ), + ], +) +async def test_integration_options( + option_name: str, + option_value: Any, + needs_validation: bool, +) -> None: + """Parametrized test covering all ChatOptions and OpenAIResponsesOptions. + + Tests both streaming and non-streaming modes for each option to ensure + they don't cause failures. Options marked with needs_validation also + check that the feature actually works correctly. + """ + client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) + # to ensure toolmode required does not endlessly loop + client.function_invocation_configuration.max_iterations = 1 + + for streaming in [False, True]: + # Prepare test message + if option_name == "tools" or option_name == "tool_choice": + # Use weather-related prompt for tool tests + messages = [ChatMessage(role="user", text="What is the weather in Seattle?")] + elif option_name == "response_format": + # Use prompt that works well with structured output + messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")] + messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + else: + # Generic prompt for simple options + messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + + # Build options dict + options: dict[str, Any] = {option_name: option_value} + + # Add tools if testing tool_choice to avoid errors + if option_name == "tool_choice": + options["tools"] = [get_weather] + + if streaming: + # Test streaming mode + response_gen = client.get_streaming_response( + messages=messages, + options=options, + ) - assert response is not None - assert isinstance(response, ChatResponse) - assert "sunny" in response.text + output_format = option_value if option_name == "response_format" else None + response = await ChatResponse.from_chat_response_generator(response_gen, output_format_type=output_format) + else: + # Test non-streaming mode + response = await client.get_response( + messages=messages, + options=options, + ) - messages.clear() - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None, f"No text in response for option '{option_name}'" + assert len(response.text) > 0, f"Empty response for option '{option_name}'" + + # Validate based on option type + if needs_validation: + if option_name == "tools" or option_name == "tool_choice": + # Should have called the weather function + text = response.text.lower() + assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" + elif option_name == "response_format": + if option_value == OutputStruct: + # Should have structured output + assert response.value is not None, "No structured output" + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + # Runtime JSON schema + assert response.value is None, "No structured output, can't parse any json." + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value + assert "seattle" in response_value["location"].lower() - # Test that the client can be used to get a response - structured_response: ChatResponse = await azure_responses_client.get_response( # type: ignore[reportAssignmentType] - messages=messages, - tools=[get_weather], - tool_choice="auto", - response_format=OutputStruct, - ) - assert structured_response is not None - assert isinstance(structured_response, ChatResponse) - assert isinstance(structured_response.value, OutputStruct) - assert "Seattle" in structured_response.value.location - assert "sunny" in structured_response.value.weather.lower() +@pytest.mark.flaky +@skip_if_azure_integration_tests_disabled +async def test_integration_web_search() -> None: + client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) + + for streaming in [False, True]: + content = { + "messages": "Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", + "options": { + "tool_choice": "auto", + "tools": [HostedWebSearchTool()], + }, + } + if streaming: + response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content)) + else: + response = await client.get_response(**content) + + assert response is not None + assert isinstance(response, ChatResponse) + assert "Rumi" in response.text + assert "Mira" in response.text + assert "Zoey" in response.text + + # Test that the client will use the web search tool with location + additional_properties = { + "user_location": { + "country": "US", + "city": "Seattle", + } + } + content = { + "messages": "What is the current weather? Do not ask for my current location.", + "options": { + "tool_choice": "auto", + "tools": [HostedWebSearchTool(additional_properties=additional_properties)], + }, + } + if streaming: + response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content)) + else: + response = await client.get_response(**content) + assert response.text is not None @pytest.mark.flaky @skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_streaming() -> None: - """Test Azure azure responses client streaming responses.""" +async def test_integration_client_file_search() -> None: + """Test Azure responses client with file search tool.""" azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) - - assert isinstance(azure_responses_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append( - ChatMessage( - role="user", - text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " - "Bonded by their love for the natural world and shared curiosity, they uncovered a " - "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " - "of climate change.", + file_id, vector_store = await create_vector_store(azure_responses_client) + try: + # Test that the client will use the file search tool + response = await azure_responses_client.get_response( + messages=[ + ChatMessage( + role="user", + text="What is the weather today? Do a file search to find the answer.", + ) + ], + options={"tools": [HostedFileSearchTool(inputs=vector_store)], "tool_choice": "auto"}, ) - ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) - # Test that the client can be used to get a response - response = azure_responses_client.get_streaming_response(messages=messages) - - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - - assert "scientists" in full_message - - messages.clear() - messages.append(ChatMessage(role="user", text="The weather in Seattle is sunny")) - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - - structured_response = await ChatResponse.from_chat_response_generator( - azure_responses_client.get_streaming_response( - messages=messages, - response_format=OutputStruct, - ), - output_format_type=OutputStruct, - ) - assert structured_response is not None - assert isinstance(structured_response, ChatResponse) - assert isinstance(structured_response.value, OutputStruct) - assert "Seattle" in structured_response.value.location - assert "sunny" in structured_response.value.weather.lower() + assert "sunny" in response.text.lower() + assert "75" in response.text + finally: + await delete_vector_store(azure_responses_client, file_id, vector_store.vector_store_id) @pytest.mark.flaky @skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_streaming_tools() -> None: - """Test azure responses client streaming tools.""" +async def test_integration_client_file_search_streaming() -> None: + """Test Azure responses client with file search tool and streaming.""" azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) + file_id, vector_store = await create_vector_store(azure_responses_client) + # Test that the client will use the file search tool + try: + response = azure_responses_client.get_streaming_response( + messages=[ + ChatMessage( + role="user", + text="What is the weather today? Do a file search to find the answer.", + ) + ], + options={"tools": [HostedFileSearchTool(inputs=vector_store)], "tool_choice": "auto"}, + ) - assert isinstance(azure_responses_client, ChatClientProtocol) - - messages: list[ChatMessage] = [ChatMessage(role="user", text="What is the weather in Seattle?")] - - # Test that the client can be used to get a response - response = azure_responses_client.get_streaming_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - ) - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - - assert "sunny" in full_message - - messages.clear() - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - - structured_response = azure_responses_client.get_streaming_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - response_format=OutputStruct, - ) - full_message = "" - async for chunk in structured_response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - - output = OutputStruct.model_validate_json(full_message) - assert "Seattle" in output.location - assert "sunny" in output.weather.lower() + assert response is not None + full_response = await ChatResponse.from_chat_response_generator(response) + assert "sunny" in full_response.text.lower() + assert "75" in full_response.text + finally: + await delete_vector_store(azure_responses_client, file_id, vector_store.vector_store_id) @pytest.mark.flaky @skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_basic_run(): - """Test Azure Responses Client agent basic run functionality with AzureOpenAIResponsesClient.""" - agent = AzureOpenAIResponsesClient(credential=AzureCliCredential()).create_agent( - instructions="You are a helpful assistant.", +async def test_integration_client_agent_hosted_mcp_tool() -> None: + """Integration test for HostedMCPTool with Azure Response Agent using Microsoft Learn MCP.""" + client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) + response = await client.get_response( + "How to create an Azure storage account using az cli?", + options={ + # this needs to be high enough to handle the full MCP tool response. + "max_tokens": 5000, + "tools": HostedMCPTool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + description="A Microsoft Learn MCP server for documentation questions", + approval_mode="never_require", + ), + }, ) - - # Test basic run - response = await agent.run("Hello! Please respond with 'Hello World' exactly.") - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - assert "hello world" in response.text.lower() - - -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_basic_run_streaming(): - """Test Azure Responses Client agent basic streaming functionality with AzureOpenAIResponsesClient.""" - async with ChatAgent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), - ) as agent: - # Test streaming run - full_text = "" - async for chunk in agent.run_stream("Please respond with exactly: 'This is a streaming response test.'"): - assert isinstance(chunk, AgentRunResponseUpdate) - if chunk.text: - full_text += chunk.text - - assert len(full_text) > 0 - assert "streaming response test" in full_text.lower() - - -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_thread_persistence(): - """Test Azure Responses Client agent thread persistence across runs with AzureOpenAIResponsesClient.""" - async with ChatAgent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), - instructions="You are a helpful assistant with good memory.", - ) as agent: - # Create a new thread that will be reused - thread = agent.get_new_thread() - - # First interaction - first_response = await agent.run("My favorite programming language is Python. Remember this.", thread=thread) - - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - - # Second interaction - test memory - second_response = await agent.run("What is my favorite programming language?", thread=thread) - - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None + assert isinstance(response, ChatResponse) + assert response.text + # Should contain Azure-related content since it's asking about Azure CLI + assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) @pytest.mark.flaky @skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_thread_storage_with_store_true(): - """Test Azure Responses Client agent with store=True to verify service_thread_id is returned.""" - async with ChatAgent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), - instructions="You are a helpful assistant.", - ) as agent: - # Create a new thread - thread = AgentThread() - - # Initially, service_thread_id should be None - assert thread.service_thread_id is None - - # Run with store=True to store messages on Azure/OpenAI side - response = await agent.run( - "Hello! Please remember that my name is Alex.", - thread=thread, - store=True, - ) - - # Validate response - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 +async def test_integration_client_agent_hosted_code_interpreter_tool(): + """Test Azure Responses Client agent with HostedCodeInterpreterTool through AzureOpenAIResponsesClient.""" + client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) - # After store=True, service_thread_id should be populated - assert thread.service_thread_id is not None - assert isinstance(thread.service_thread_id, str) - assert len(thread.service_thread_id) > 0 + response = await client.get_response( + "Calculate the sum of numbers from 1 to 10 using Python code.", + options={ + "tools": [HostedCodeInterpreterTool()], + }, + ) + # Should contain calculation result (sum of 1-10 = 55) or code execution content + contains_relevant_content = any( + term in response.text.lower() for term in ["55", "sum", "code", "python", "calculate", "10"] + ) + assert contains_relevant_content or len(response.text.strip()) > 10 @pytest.mark.flaky @skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_existing_thread(): +async def test_integration_client_agent_existing_thread(): """Test Azure Responses Client agent with existing thread to continue conversations across agent instances.""" # First conversation - capture the thread preserved_thread = None @@ -428,7 +430,7 @@ async def test_azure_responses_client_agent_existing_thread(): ) as first_agent: # Start a conversation and capture the thread thread = first_agent.get_new_thread() - first_response = await first_agent.run("My hobby is photography. Remember this.", thread=thread) + first_response = await first_agent.run("My hobby is photography. Remember this.", thread=thread, store=True) assert isinstance(first_response, AgentRunResponse) assert first_response.text is not None @@ -448,189 +450,3 @@ async def test_azure_responses_client_agent_existing_thread(): assert isinstance(second_response, AgentRunResponse) assert second_response.text is not None assert "photography" in second_response.text.lower() - - -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_hosted_code_interpreter_tool(): - """Test Azure Responses Client agent with HostedCodeInterpreterTool through AzureOpenAIResponsesClient.""" - async with ChatAgent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), - instructions="You are a helpful assistant that can execute Python code.", - tools=[HostedCodeInterpreterTool()], - ) as agent: - # Test code interpreter functionality - response = await agent.run("Calculate the sum of numbers from 1 to 10 using Python code.") - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - # Should contain calculation result (sum of 1-10 = 55) or code execution content - contains_relevant_content = any( - term in response.text.lower() for term in ["55", "sum", "code", "python", "calculate", "10"] - ) - assert contains_relevant_content or len(response.text.strip()) > 10 - - -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_level_tool_persistence(): - """Test that agent-level tools persist across multiple runs with Azure Responses Client.""" - - async with ChatAgent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), - instructions="You are a helpful assistant that uses available tools.", - tools=[get_weather], # Agent-level tool - ) as agent: - # First run - agent-level tool should be available - first_response = await agent.run("What's the weather like in Chicago?") - - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - # Should use the agent-level weather tool - assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) - - # Second run - agent-level tool should still be available (persistence test) - second_response = await agent.run("What's the weather in Miami?") - - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - # Should use the agent-level weather tool again - assert any(term in second_response.text.lower() for term in ["miami", "sunny", "72"]) - - -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_chat_options_run_level() -> None: - """Integration test for comprehensive ChatOptions parameter coverage with Azure Response Agent.""" - async with ChatAgent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), - instructions="You are a helpful assistant.", - ) as agent: - response = await agent.run( - "Provide a brief, helpful response.", - max_tokens=100, - temperature=0.7, - top_p=0.9, - seed=123, - user="comprehensive-test-user", - tools=[get_weather], - tool_choice="auto", - ) - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - - -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_chat_options_agent_level() -> None: - """Integration test for comprehensive ChatOptions parameter coverage with Azure Response Agent.""" - async with ChatAgent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), - instructions="You are a helpful assistant.", - max_tokens=100, - temperature=0.7, - top_p=0.9, - seed=123, - user="comprehensive-test-user", - tools=[get_weather], - tool_choice="auto", - ) as agent: - response = await agent.run( - "Provide a brief, helpful response.", - ) - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - - -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_agent_hosted_mcp_tool() -> None: - """Integration test for HostedMCPTool with Azure Response Agent using Microsoft Learn MCP.""" - - async with ChatAgent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), - instructions="You are a helpful assistant that can help with microsoft documentation questions.", - tools=HostedMCPTool( - name="Microsoft Learn MCP", - url="https://learn.microsoft.com/api/mcp", - description="A Microsoft Learn MCP server for documentation questions", - approval_mode="never_require", - ), - ) as agent: - response = await agent.run( - "How to create an Azure storage account using az cli?", - # this needs to be high enough to handle the full MCP tool response. - max_tokens=5000, - ) - - assert isinstance(response, AgentRunResponse) - assert response.text - # Should contain Azure-related content since it's asking about Azure CLI - assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) - - -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_file_search() -> None: - """Test Azure responses client with file search tool.""" - azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) - - assert isinstance(azure_responses_client, ChatClientProtocol) - - file_id, vector_store = await create_vector_store(azure_responses_client) - # Test that the client will use the file search tool - response = await azure_responses_client.get_response( - messages=[ - ChatMessage( - role="user", - text="What is the weather today? Do a file search to find the answer.", - ) - ], - tools=[HostedFileSearchTool(inputs=vector_store)], - tool_choice="auto", - ) - - await delete_vector_store(azure_responses_client, file_id, vector_store.vector_store_id) - assert "sunny" in response.text.lower() - assert "75" in response.text - - -@pytest.mark.flaky -@skip_if_azure_integration_tests_disabled -async def test_azure_responses_client_file_search_streaming() -> None: - """Test Azure responses client with file search tool and streaming.""" - azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) - - assert isinstance(azure_responses_client, ChatClientProtocol) - - file_id, vector_store = await create_vector_store(azure_responses_client) - # Test that the client will use the file search tool - response = azure_responses_client.get_streaming_response( - messages=[ - ChatMessage( - role="user", - text="What is the weather today? Do a file search to find the answer.", - ) - ], - tools=[HostedFileSearchTool(inputs=vector_store)], - tool_choice="auto", - ) - - assert response is not None - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - - await delete_vector_store(azure_responses_client, file_id, vector_store.vector_store_id) - - assert "sunny" in full_message.lower() - assert "75" in full_message diff --git a/python/packages/core/tests/core/conftest.py b/python/packages/core/tests/core/conftest.py index ab3aedb7d3..d8a09505af 100644 --- a/python/packages/core/tests/core/conftest.py +++ b/python/packages/core/tests/core/conftest.py @@ -4,7 +4,7 @@ import logging import sys from collections.abc import AsyncIterable, MutableSequence -from typing import Any +from typing import Any, Generic from unittest.mock import patch from uuid import uuid4 @@ -18,7 +18,6 @@ AgentThread, BaseChatClient, ChatMessage, - ChatOptions, ChatResponse, ChatResponseUpdate, Role, @@ -28,6 +27,7 @@ use_chat_middleware, use_function_invocation, ) +from agent_framework._clients import TOptions_co if sys.version_info >= (3, 12): from typing import override # type: ignore @@ -113,7 +113,7 @@ async def get_streaming_response( @use_chat_middleware -class MockBaseChatClient(BaseChatClient): +class MockBaseChatClient(BaseChatClient[TOptions_co], Generic[TOptions_co]): """Mock implementation of the BaseChatClient.""" def __init__(self, **kwargs: Any): @@ -127,27 +127,27 @@ async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: """Send a chat request to the AI service. Args: messages: The chat messages to send. - chat_options: The options for the request. + options: The options dict for the request. kwargs: Any additional keyword arguments. Returns: The chat response contents representing the response(s). """ - logger.debug(f"Running base chat client inner, with: {messages=}, {chat_options=}, {kwargs=}") + logger.debug(f"Running base chat client inner, with: {messages=}, {options=}, {kwargs=}") self.call_count += 1 if not self.run_responses: return ChatResponse(messages=ChatMessage(role="assistant", text=f"test response - {messages[-1].text}")) response = self.run_responses.pop(0) - if chat_options.tool_choice == "none": + if options.get("tool_choice") == "none": return ChatResponse( messages=ChatMessage( role="assistant", @@ -163,14 +163,14 @@ async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - logger.debug(f"Running base chat client inner stream, with: {messages=}, {chat_options=}, {kwargs=}") + logger.debug(f"Running base chat client inner stream, with: {messages=}, {options=}, {kwargs=}") if not self.streaming_responses: yield ChatResponseUpdate(text=f"update - {messages[0].text}", role="assistant") return - if chat_options.tool_choice == "none": + if options.get("tool_choice") == "none": yield ChatResponseUpdate(text="I broke out of the function invocation loop...", role="assistant") return response = self.streaming_responses.pop(0) diff --git a/python/packages/core/tests/core/test_agents.py b/python/packages/core/tests/core/test_agents.py index 7d0c08e494..a1147f6db8 100644 --- a/python/packages/core/tests/core/test_agents.py +++ b/python/packages/core/tests/core/test_agents.py @@ -118,8 +118,8 @@ async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: Ch tool = HostedCodeInterpreterTool() agent = ChatAgent(chat_client=chat_client, tools=[tool]) - assert agent.chat_options.tools is not None - base_tools = agent.chat_options.tools + assert agent.default_options.get("tools") is not None + base_tools = agent.default_options["tools"] thread = agent.get_new_thread() _, prepared_chat_options, _ = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] @@ -127,11 +127,11 @@ async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: Ch input_messages=[ChatMessage(role=Role.USER, text="Test")], ) - assert prepared_chat_options.tools is not None - assert base_tools is not prepared_chat_options.tools + assert prepared_chat_options.get("tools") is not None + assert base_tools is not prepared_chat_options["tools"] - prepared_chat_options.tools.append(HostedCodeInterpreterTool()) # type: ignore[arg-type] - assert len(agent.chat_options.tools) == 1 + prepared_chat_options["tools"].append(HostedCodeInterpreterTool()) # type: ignore[arg-type] + assert len(agent.default_options["tools"]) == 1 async def test_chat_client_agent_update_thread_id(chat_client_base: ChatClientProtocol) -> None: @@ -597,61 +597,68 @@ async def test_chat_agent_tool_choice_run_level_overrides_agent_level( chat_client_base: Any, ai_function_tool: Any ) -> None: """Verify that tool_choice passed to run() overrides agent-level tool_choice.""" - from agent_framework import ChatOptions, ToolMode - captured_options: list[ChatOptions] = [] + captured_options: list[dict[str, Any]] = [] # Store the original inner method original_inner = chat_client_base._inner_get_response async def capturing_inner( - *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> ChatResponse: - captured_options.append(chat_options) - return await original_inner(messages=messages, chat_options=chat_options, **kwargs) + captured_options.append(options) + return await original_inner(messages=messages, options=options, **kwargs) chat_client_base._inner_get_response = capturing_inner # Create agent with agent-level tool_choice="auto" and a tool (tools required for tool_choice to be meaningful) - agent = ChatAgent(chat_client=chat_client_base, tool_choice="auto", tools=[ai_function_tool]) + agent = ChatAgent( + chat_client=chat_client_base, + tools=[ai_function_tool], + options={"tool_choice": "auto"}, + ) # Run with run-level tool_choice="required" - await agent.run("Hello", tool_choice="required") + await agent.run("Hello", options={"tool_choice": "required"}) # Verify the client received tool_choice="required", not "auto" assert len(captured_options) >= 1 - assert captured_options[0].tool_choice == "required" - assert captured_options[0].tool_choice == ToolMode.REQUIRED_ANY + assert captured_options[0]["tool_choice"] == "required" async def test_chat_agent_tool_choice_agent_level_used_when_run_level_not_specified( chat_client_base: Any, ai_function_tool: Any ) -> None: """Verify that agent-level tool_choice is used when run() doesn't specify one.""" - from agent_framework import ChatOptions, ToolMode + from agent_framework import ChatOptions captured_options: list[ChatOptions] = [] original_inner = chat_client_base._inner_get_response async def capturing_inner( - *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> ChatResponse: - captured_options.append(chat_options) - return await original_inner(messages=messages, chat_options=chat_options, **kwargs) + captured_options.append(options) + return await original_inner(messages=messages, options=options, **kwargs) chat_client_base._inner_get_response = capturing_inner # Create agent with agent-level tool_choice="required" and a tool - agent = ChatAgent(chat_client=chat_client_base, tool_choice="required", tools=[ai_function_tool]) + agent = ChatAgent( + chat_client=chat_client_base, + tools=[ai_function_tool], + default_options={"tool_choice": "required"}, + ) # Run without specifying tool_choice await agent.run("Hello") # Verify the client received tool_choice="required" from agent-level assert len(captured_options) >= 1 - assert captured_options[0].tool_choice == "required" - assert captured_options[0].tool_choice == ToolMode.REQUIRED_ANY + assert captured_options[0]["tool_choice"] == "required" + # older code compared to ToolMode constants; ensure value is 'required' + assert captured_options[0]["tool_choice"] == "required" async def test_chat_agent_tool_choice_none_at_run_preserves_agent_level( @@ -665,19 +672,23 @@ async def test_chat_agent_tool_choice_none_at_run_preserves_agent_level( original_inner = chat_client_base._inner_get_response async def capturing_inner( - *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> ChatResponse: - captured_options.append(chat_options) - return await original_inner(messages=messages, chat_options=chat_options, **kwargs) + captured_options.append(options) + return await original_inner(messages=messages, options=options, **kwargs) chat_client_base._inner_get_response = capturing_inner # Create agent with agent-level tool_choice="auto" and a tool - agent = ChatAgent(chat_client=chat_client_base, tool_choice="auto", tools=[ai_function_tool]) + agent = ChatAgent( + chat_client=chat_client_base, + tools=[ai_function_tool], + default_options={"tool_choice": "auto"}, + ) # Run with explicitly passing None (same as not specifying) - await agent.run("Hello", tool_choice=None) + await agent.run("Hello", options={"tool_choice": None}) # Verify the client received tool_choice="auto" from agent-level assert len(captured_options) >= 1 - assert captured_options[0].tool_choice == "auto" + assert captured_options[0]["tool_choice"] == "auto" diff --git a/python/packages/core/tests/core/test_chat_agent_integration.py b/python/packages/core/tests/core/test_chat_agent_integration.py new file mode 100644 index 0000000000..331d599aba --- /dev/null +++ b/python/packages/core/tests/core/test_chat_agent_integration.py @@ -0,0 +1,433 @@ +# Copyright (c) Microsoft. All rights reserved. + +import json +import os +from typing import Annotated + +import pytest +from pydantic import BaseModel + +from agent_framework import ( + AgentRunResponse, + AgentRunResponseUpdate, + AgentThread, + ChatAgent, + HostedCodeInterpreterTool, + HostedImageGenerationTool, + HostedMCPTool, + MCPStreamableHTTPTool, + ai_function, +) +from agent_framework.openai import OpenAIResponsesClient + +skip_if_openai_integration_tests_disabled = pytest.mark.skipif( + os.getenv("RUN_INTEGRATION_TESTS", "false").lower() != "true" + or os.getenv("OPENAI_API_KEY", "") in ("", "test-dummy-key"), + reason="No real OPENAI_API_KEY provided; skipping integration tests." + if os.getenv("RUN_INTEGRATION_TESTS", "false").lower() == "true" + else "Integration tests are disabled.", +) + + +@ai_function +async def get_weather(location: Annotated[str, "The location as a city name"]) -> str: + """Get the current weather in a given location.""" + # Implementation of the tool to get weather + return f"The current weather in {location} is sunny." + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_basic_run_streaming(): + """Test OpenAI Responses Client agent basic streaming functionality with OpenAIResponsesClient.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + ) as agent: + # Test streaming run + full_text = "" + async for chunk in agent.run_stream("Please respond with exactly: 'This is a streaming response test.'"): + assert isinstance(chunk, AgentRunResponseUpdate) + if chunk.text: + full_text += chunk.text + + assert len(full_text) > 0 + assert "streaming response test" in full_text.lower() + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_thread_persistence(): + """Test OpenAI Responses Client agent thread persistence across runs with OpenAIResponsesClient.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant with good memory.", + ) as agent: + # Create a new thread that will be reused + thread = agent.get_new_thread() + + # First interaction + first_response = await agent.run("My favorite programming language is Python. Remember this.", thread=thread) + + assert isinstance(first_response, AgentRunResponse) + assert first_response.text is not None + + # Second interaction - test memory + second_response = await agent.run("What is my favorite programming language?", thread=thread) + + assert isinstance(second_response, AgentRunResponse) + assert second_response.text is not None + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_thread_storage_with_store_true(): + """Test OpenAI Responses Client agent with store=True to verify service_thread_id is returned.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant.", + ) as agent: + # Create a new thread + thread = AgentThread() + + # Initially, service_thread_id should be None + assert thread.service_thread_id is None + + # Run with store=True to store messages on OpenAI side + response = await agent.run( + "Hello! Please remember that my name is Alex.", + thread=thread, + options={"store": True}, + ) + + # Validate response + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 + + # After store=True, service_thread_id should be populated + assert thread.service_thread_id is not None + assert isinstance(thread.service_thread_id, str) + assert len(thread.service_thread_id) > 0 + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_existing_thread(): + """Test OpenAI Responses Client agent with existing thread to continue conversations across agent instances.""" + # First conversation - capture the thread + preserved_thread = None + + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant with good memory.", + ) as first_agent: + # Start a conversation and capture the thread + thread = first_agent.get_new_thread() + first_response = await first_agent.run("My hobby is photography. Remember this.", thread=thread) + + assert isinstance(first_response, AgentRunResponse) + assert first_response.text is not None + + # Preserve the thread for reuse + preserved_thread = thread + + # Second conversation - reuse the thread in a new agent instance + if preserved_thread: + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant with good memory.", + ) as second_agent: + # Reuse the preserved thread + second_response = await second_agent.run("What is my hobby?", thread=preserved_thread) + + assert isinstance(second_response, AgentRunResponse) + assert second_response.text is not None + assert "photography" in second_response.text.lower() + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_hosted_code_interpreter_tool(): + """Test OpenAI Responses Client agent with HostedCodeInterpreterTool through OpenAIResponsesClient.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that can execute Python code.", + tools=[HostedCodeInterpreterTool()], + ) as agent: + # Test code interpreter functionality + response = await agent.run("Calculate the sum of numbers from 1 to 10 using Python code.") + + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 + # Should contain calculation result (sum of 1-10 = 55) or code execution content + contains_relevant_content = any( + term in response.text.lower() for term in ["55", "sum", "code", "python", "calculate", "10"] + ) + assert contains_relevant_content or len(response.text.strip()) > 10 + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_image_generation_tool(): + """Test OpenAI Responses Client agent with raw image_generation tool through OpenAIResponsesClient.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that can generate images.", + tools=HostedImageGenerationTool(options={"image_size": "1024x1024", "media_type": "png"}), + ) as agent: + # Test image generation functionality + response = await agent.run("Generate an image of a cute red panda sitting on a tree branch in a forest.") + + assert isinstance(response, AgentRunResponse) + assert response.messages + + # Verify we got image content - look for ImageGenerationToolResultContent + image_content_found = False + for message in response.messages: + for content in message.contents: + if content.type == "image_generation_tool_result" and content.outputs: + image_content_found = True + break + if image_content_found: + break + + # The test passes if we got image content + assert image_content_found, "Expected to find image content in response" + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_level_tool_persistence(): + """Test that agent-level tools persist across multiple runs with OpenAI Responses Client.""" + + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that uses available tools.", + tools=[get_weather], # Agent-level tool + ) as agent: + # First run - agent-level tool should be available + first_response = await agent.run("What's the weather like in Chicago?") + + assert isinstance(first_response, AgentRunResponse) + assert first_response.text is not None + # Should use the agent-level weather tool + assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) + + # Second run - agent-level tool should still be available (persistence test) + second_response = await agent.run("What's the weather in Miami?") + + assert isinstance(second_response, AgentRunResponse) + assert second_response.text is not None + # Should use the agent-level weather tool again + assert any(term in second_response.text.lower() for term in ["miami", "sunny", "72"]) + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_run_level_tool_isolation(): + """Test that run-level tools are isolated to specific runs and don't persist with OpenAI Responses Client.""" + # Counter to track how many times the weather tool is called + call_count = 0 + + @ai_function + async def get_weather_with_counter( + location: Annotated[str, "The location as a city name"], + ) -> str: + """Get the current weather in a given location.""" + nonlocal call_count + call_count += 1 + return f"The weather in {location} is sunny and 72°F." + + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant.", + ) as agent: + # First run - use run-level tool + first_response = await agent.run( + "What's the weather like in Chicago?", + tools=[get_weather_with_counter], # Run-level tool + ) + + assert isinstance(first_response, AgentRunResponse) + assert first_response.text is not None + # Should use the run-level weather tool (call count should be 1) + assert call_count == 1 + assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) + + # Second run - run-level tool should NOT persist (key isolation test) + second_response = await agent.run("What's the weather like in Miami?") + + assert isinstance(second_response, AgentRunResponse) + assert second_response.text is not None + # Should NOT use the weather tool since it was only run-level in previous call + # Call count should still be 1 (no additional calls) + assert call_count == 1 + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_chat_options_agent_level() -> None: + """Integration test for comprehensive ChatOptions parameter coverage with OpenAI Response Agent.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant.", + tools=[get_weather], + default_options={ + "max_tokens": 100, + "temperature": 0.7, + "top_p": 0.9, + "seed": 123, + "user": "comprehensive-test-user", + "tool_choice": "auto", + }, + ) as agent: + response = await agent.run( + "Provide a brief, helpful response.", + ) + + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_hosted_mcp_tool() -> None: + """Integration test for HostedMCPTool with OpenAI Response Agent using Microsoft Learn MCP.""" + + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that can help with microsoft documentation questions.", + tools=HostedMCPTool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + description="A Microsoft Learn MCP server for documentation questions", + approval_mode="never_require", + ), + ) as agent: + response = await agent.run( + "How to create an Azure storage account using az cli?", + # this needs to be high enough to handle the full MCP tool response. + options={"max_tokens": 5000}, + ) + + assert isinstance(response, AgentRunResponse) + assert response.text + # Should contain Azure-related content since it's asking about Azure CLI + assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_local_mcp_tool() -> None: + """Integration test for MCPStreamableHTTPTool with OpenAI Response Agent using Microsoft Learn MCP.""" + + mcp_tool = MCPStreamableHTTPTool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + ) + + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that can help with microsoft documentation questions.", + tools=[mcp_tool], + ) as agent: + response = await agent.run( + "How to create an Azure storage account using az cli?", + options={"max_tokens": 200}, + ) + + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 + # Should contain Azure-related content since it's asking about Azure CLI + assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) + + +class ReleaseBrief(BaseModel): + """Structured output model for release brief testing.""" + + title: str + summary: str + highlights: list[str] + model_config = {"extra": "forbid"} + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_with_response_format_pydantic() -> None: + """Integration test for response_format with Pydantic model using OpenAI Responses Client.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that returns structured JSON responses.", + ) as agent: + response = await agent.run( + "Summarize the following release notes into a ReleaseBrief:\n\n" + "Version 2.0 Release Notes:\n" + "- Added new streaming API for real-time responses\n" + "- Improved error handling with detailed messages\n" + "- Performance boost of 50% in batch processing\n" + "- Fixed memory leak in connection pooling", + options={ + "response_format": ReleaseBrief, + }, + ) + + # Validate response + assert isinstance(response, AgentRunResponse) + assert response.value is not None + assert isinstance(response.value, ReleaseBrief) + + # Validate structured output fields + brief = response.value + assert len(brief.title) > 0 + assert len(brief.summary) > 0 + assert len(brief.highlights) > 0 + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_with_runtime_json_schema() -> None: + """Integration test for response_format with runtime JSON schema using OpenAI Responses Client.""" + runtime_schema = { + "title": "WeatherDigest", + "type": "object", + "properties": { + "location": {"type": "string"}, + "conditions": {"type": "string"}, + "temperature_c": {"type": "number"}, + "advisory": {"type": "string"}, + }, + "required": ["location", "conditions", "temperature_c", "advisory"], + "additionalProperties": False, + } + + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="Return only JSON that matches the provided schema. Do not add commentary.", + ) as agent: + response = await agent.run( + "Give a brief weather digest for Seattle.", + options={ + "response_format": { + "type": "json_schema", + "json_schema": { + "name": runtime_schema["title"], + "strict": True, + "schema": runtime_schema, + }, + }, + }, + ) + + # Validate response + assert isinstance(response, AgentRunResponse) + assert response.text is not None + + # Parse JSON and validate structure + parsed = json.loads(response.text) + assert "location" in parsed + assert "conditions" in parsed + assert "temperature_c" in parsed + assert "advisory" in parsed diff --git a/python/packages/core/tests/core/test_clients.py b/python/packages/core/tests/core/test_clients.py index 423a7e42b5..67ecd54a8d 100644 --- a/python/packages/core/tests/core/test_clients.py +++ b/python/packages/core/tests/core/test_clients.py @@ -7,7 +7,6 @@ BaseChatClient, ChatClientProtocol, ChatMessage, - ChatOptions, Role, ) @@ -50,12 +49,22 @@ async def test_chat_client_instructions_handling(chat_client_base: ChatClientPro chat_client_base, "_inner_get_response", ) as mock_inner_get_response: - await chat_client_base.get_response("hello", chat_options=ChatOptions(instructions=instructions)) + await chat_client_base.get_response("hello", options={"instructions": instructions}) mock_inner_get_response.assert_called_once() _, kwargs = mock_inner_get_response.call_args messages = kwargs.get("messages", []) - assert len(messages) == 2 - assert messages[0].role == Role.SYSTEM - assert messages[0].text == instructions - assert messages[1].role == Role.USER - assert messages[1].text == "hello" + assert len(messages) == 1 + assert messages[0].role == Role.USER + assert messages[0].text == "hello" + + from agent_framework._types import prepend_instructions_to_messages + + appended_messages = prepend_instructions_to_messages( + [ChatMessage(role=Role.USER, text="hello")], + instructions, + ) + assert len(appended_messages) == 2 + assert appended_messages[0].role == Role.SYSTEM + assert appended_messages[0].text == "You are a helpful assistant." + assert appended_messages[1].role == Role.USER + assert appended_messages[1].text == "hello" diff --git a/python/packages/core/tests/core/test_function_invocation_logic.py b/python/packages/core/tests/core/test_function_invocation_logic.py index bc96ddcc35..3aa8586a69 100644 --- a/python/packages/core/tests/core/test_function_invocation_logic.py +++ b/python/packages/core/tests/core/test_function_invocation_logic.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. from collections.abc import Awaitable, Callable +from typing import Any import pytest @@ -8,7 +9,6 @@ ChatAgent, ChatClientProtocol, ChatMessage, - ChatOptions, ChatResponse, ChatResponseUpdate, FunctionApprovalRequestContent, @@ -39,7 +39,7 @@ def ai_func(arg1: str) -> str: ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), ] - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[ai_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [ai_func]}) assert exec_counter == 1 assert len(response.messages) == 3 assert response.messages[0].role == Role.ASSISTANT @@ -79,7 +79,7 @@ def ai_func(arg1: str) -> str: ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), ] - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[ai_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [ai_func]}) assert exec_counter == 2 assert len(response.messages) == 5 assert response.messages[0].role == Role.ASSISTANT @@ -121,7 +121,9 @@ def ai_func(arg1: str) -> str: ], ] updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[ai_func]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [ai_func]} + ): updates.append(update) assert len(updates) == 4 # two updates with the function call, the function result and the final text assert updates[0].contents[0].call_id == "1" @@ -371,18 +373,18 @@ def func_with_approval(arg1: str) -> str: ] # Execute the test - chat_options = ChatOptions(tool_choice="auto", tools=tools) + options: dict[str, Any] = {"tool_choice": "auto", "tools": tools} if thread_type == "service": - # For service threads, we need to pass conversation_id via ChatOptions - chat_options.store = True - chat_options.conversation_id = conversation_id + # For service threads, we need to pass conversation_id via options + options["store"] = True + options["conversation_id"] = conversation_id if not streaming: - response = await chat_client_base.get_response("hello", chat_options=chat_options) + response = await chat_client_base.get_response("hello", options=options) messages = response.messages else: updates = [] - async for update in chat_client_base.get_streaming_response("hello", chat_options=chat_options): + async for update in chat_client_base.get_streaming_response("hello", options=options): updates.append(update) messages = updates @@ -492,7 +494,9 @@ def func_rejected(arg1: str) -> str: ] # Get the response with approval requests - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[func_approved, func_rejected]) + response = await chat_client_base.get_response( + "hello", options={"tool_choice": "auto", "tools": [func_approved, func_rejected]} + ) # Approval requests are now added to the assistant message, not a separate message assert len(response.messages) == 1 # Assistant message should have: 2 FunctionCallContent + 2 FunctionApprovalRequestContent @@ -519,7 +523,9 @@ def func_rejected(arg1: str) -> str: all_messages = response.messages + [ChatMessage(role="user", contents=[approved_response, rejected_response])] # Call get_response which will process the approvals - await chat_client_base.get_response(all_messages, tool_choice="auto", tools=[func_approved, func_rejected]) + await chat_client_base.get_response( + all_messages, options={"tool_choice": "auto", "tools": [func_approved, func_rejected]} + ) # Verify the approval/rejection was processed correctly # Find the results in the input messages (modified in-place) @@ -574,7 +580,9 @@ def func_with_approval(arg1: str) -> str: ), ] - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[func_with_approval]) + response = await chat_client_base.get_response( + "hello", options={"tool_choice": "auto", "tools": [func_with_approval]} + ) # Should have one assistant message containing both the call and approval request assert len(response.messages) == 1 @@ -610,7 +618,9 @@ def func_with_approval(arg1: str) -> str: ] # Get approval request - response1 = await chat_client_base.get_response("hello", tool_choice="auto", tools=[func_with_approval]) + response1 = await chat_client_base.get_response( + "hello", options={"tool_choice": "auto", "tools": [func_with_approval]} + ) # Store messages (like a thread would) persisted_messages = [ @@ -628,7 +638,9 @@ def func_with_approval(arg1: str) -> str: persisted_messages.append(ChatMessage(role="user", contents=[approval_response])) # Continue with all persisted messages - response2 = await chat_client_base.get_response(persisted_messages, tool_choice="auto", tools=[func_with_approval]) + response2 = await chat_client_base.get_response( + persisted_messages, options={"tool_choice": "auto", "tools": [func_with_approval]} + ) # Should execute successfully assert response2 is not None @@ -656,7 +668,9 @@ def func_with_approval(arg1: str) -> str: ChatResponse(messages=ChatMessage(role="assistant", text="done")), ] - response1 = await chat_client_base.get_response("hello", tool_choice="auto", tools=[func_with_approval]) + response1 = await chat_client_base.get_response( + "hello", options={"tool_choice": "auto", "tools": [func_with_approval]} + ) approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] approval_response = FunctionApprovalResponseContent( @@ -666,7 +680,7 @@ def func_with_approval(arg1: str) -> str: ) all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] - await chat_client_base.get_response(all_messages, tool_choice="auto", tools=[func_with_approval]) + await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [func_with_approval]}) # Count function calls with the same call_id function_call_count = sum( @@ -699,7 +713,9 @@ def func_with_approval(arg1: str) -> str: ChatResponse(messages=ChatMessage(role="assistant", text="done")), ] - response1 = await chat_client_base.get_response("hello", tool_choice="auto", tools=[func_with_approval]) + response1 = await chat_client_base.get_response( + "hello", options={"tool_choice": "auto", "tools": [func_with_approval]} + ) approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] rejection_response = FunctionApprovalResponseContent( @@ -709,7 +725,7 @@ def func_with_approval(arg1: str) -> str: ) all_messages = response1.messages + [ChatMessage(role="user", contents=[rejection_response])] - await chat_client_base.get_response(all_messages, tool_choice="auto", tools=[func_with_approval]) + await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [func_with_approval]}) # Find the rejection result rejection_result = next( @@ -753,7 +769,7 @@ def ai_func(arg1: str) -> str: # Set max_iterations to 1 in additional_properties chat_client_base.function_invocation_configuration.max_iterations = 1 - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[ai_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [ai_func]}) # With max_iterations=1, we should: # 1. Execute first function call (exec_counter=1) @@ -780,7 +796,7 @@ def ai_func(arg1: str) -> str: # Disable function invocation chat_client_base.function_invocation_configuration.enabled = False - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[ai_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [ai_func]}) # Function should not be executed - when enabled=False, the loop doesn't run assert exec_counter == 0 @@ -827,7 +843,7 @@ def error_func(arg1: str) -> str: # Set max_consecutive_errors to 2 chat_client_base.function_invocation_configuration.max_consecutive_errors_per_request = 2 - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[error_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [error_func]}) # Should stop after 2 consecutive errors and force a non-tool response error_results = [ @@ -870,7 +886,7 @@ def known_func(arg1: str) -> str: # Set terminate_on_unknown_calls to False (default) chat_client_base.function_invocation_configuration.terminate_on_unknown_calls = False - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[known_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [known_func]}) # Should have a result message indicating the tool wasn't found assert len(response.messages) == 3 @@ -904,7 +920,7 @@ def known_func(arg1: str) -> str: # Should raise an exception when encountering an unknown function with pytest.raises(KeyError, match='Error: Requested function "unknown_function" not found'): - await chat_client_base.get_response("hello", tool_choice="auto", tools=[known_func]) + await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [known_func]}) assert exec_counter == 0 @@ -940,7 +956,7 @@ def hidden_func(arg1: str) -> str: chat_client_base.function_invocation_configuration.additional_tools = [hidden_func] # Only pass visible_func in the tools parameter - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[visible_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [visible_func]}) # Additional tools are treated as declaration_only, so not executed # The function call should be in the messages but not executed @@ -976,7 +992,7 @@ def error_func(arg1: str) -> str: # Set include_detailed_errors to False (default) chat_client_base.function_invocation_configuration.include_detailed_errors = False - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[error_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [error_func]}) # Should have a generic error message error_result = next( @@ -1008,7 +1024,7 @@ def error_func(arg1: str) -> str: # Set include_detailed_errors to True chat_client_base.function_invocation_configuration.include_detailed_errors = True - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[error_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [error_func]}) # Should have detailed error message error_result = next( @@ -1076,7 +1092,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str # Set include_detailed_errors to True chat_client_base.function_invocation_configuration.include_detailed_errors = True - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[typed_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [typed_func]}) # Should have detailed validation error error_result = next( @@ -1108,7 +1124,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str # Set include_detailed_errors to False (default) chat_client_base.function_invocation_configuration.include_detailed_errors = False - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[typed_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [typed_func]}) # Should have generic validation error error_result = next( @@ -1175,7 +1191,7 @@ def test_func(arg1: str) -> str: ] # Get approval request - response1 = await chat_client_base.get_response("hello", tool_choice="auto", tools=[test_func]) + response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [test_func]}) approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] @@ -1190,7 +1206,7 @@ def test_func(arg1: str) -> str: all_messages = response1.messages + [ChatMessage(role="user", contents=[rejection_response])] # This should handle the rejection gracefully (not raise ToolException to user) - await chat_client_base.get_response(all_messages, tool_choice="auto", tools=[test_func]) + await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [test_func]}) # Should have a rejection result rejection_result = next( @@ -1235,7 +1251,7 @@ def error_func(arg1: str) -> str: chat_client_base.function_invocation_configuration.include_detailed_errors = False # Get approval request - response1 = await chat_client_base.get_response("hello", tool_choice="auto", tools=[error_func]) + response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [error_func]}) approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] @@ -1249,7 +1265,7 @@ def error_func(arg1: str) -> str: all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] # Execute the approved function (which will error) - await chat_client_base.get_response(all_messages, tool_choice="auto", tools=[error_func]) + await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [error_func]}) # Should have executed the function assert exec_counter == 1 @@ -1299,7 +1315,7 @@ def error_func(arg1: str) -> str: chat_client_base.function_invocation_configuration.include_detailed_errors = True # Get approval request - response1 = await chat_client_base.get_response("hello", tool_choice="auto", tools=[error_func]) + response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [error_func]}) approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] @@ -1313,7 +1329,7 @@ def error_func(arg1: str) -> str: all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] # Execute the approved function (which will error) - await chat_client_base.get_response(all_messages, tool_choice="auto", tools=[error_func]) + await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [error_func]}) # Should have executed the function assert exec_counter == 1 @@ -1361,7 +1377,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str chat_client_base.function_invocation_configuration.include_detailed_errors = True # Get approval request - response1 = await chat_client_base.get_response("hello", tool_choice="auto", tools=[typed_func]) + response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [typed_func]}) approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] @@ -1375,7 +1391,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] # Execute the approved function (which will fail validation) - await chat_client_base.get_response(all_messages, tool_choice="auto", tools=[typed_func]) + await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [typed_func]}) # Should NOT have executed the function (validation failed before execution) assert exec_counter == 0 @@ -1418,7 +1434,7 @@ def success_func(arg1: str) -> str: ] # Get approval request - response1 = await chat_client_base.get_response("hello", tool_choice="auto", tools=[success_func]) + response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [success_func]}) approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] @@ -1432,7 +1448,7 @@ def success_func(arg1: str) -> str: all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] # Execute the approved function - await chat_client_base.get_response(all_messages, tool_choice="auto", tools=[success_func]) + await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [success_func]}) # Should have executed successfully assert exec_counter == 1 @@ -1476,7 +1492,9 @@ async def test_declaration_only_tool(chat_client_base: ChatClientProtocol): ChatResponse(messages=ChatMessage(role="assistant", text="done")), ] - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[declaration_func]) + response = await chat_client_base.get_response( + "hello", options={"tool_choice": "auto", "tools": [declaration_func]} + ) # Should have the function call in messages but not a result function_calls = [ @@ -1530,7 +1548,7 @@ async def func2(arg1: str) -> str: ChatResponse(messages=ChatMessage(role="assistant", text="done")), ] - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[func1, func2]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [func1, func2]}) # Both functions should have been executed assert "func1_start" in exec_order @@ -1566,7 +1584,7 @@ def plain_function(arg1: str) -> str: ] # Pass plain function (will be auto-converted) - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[plain_function]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [plain_function]}) # Function should be executed assert exec_counter == 1 @@ -1598,7 +1616,7 @@ def test_func(arg1: str) -> str: ), ] - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[test_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [test_func]}) # Should have executed the function results = [ @@ -1625,7 +1643,7 @@ def test_func(arg1: str) -> str: ChatResponse(messages=ChatMessage(role="assistant", text="done")), ] - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[test_func]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [test_func]}) # Should have messages with both function call and function result assert len(response.messages) >= 2 @@ -1667,7 +1685,7 @@ def sometimes_fails(arg1: str) -> str: ChatResponse(messages=ChatMessage(role="assistant", text="done")), ] - response = await chat_client_base.get_response("hello", tool_choice="auto", tools=[sometimes_fails]) + response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [sometimes_fails]}) # Should have both an error and a success error_results = [ @@ -1714,7 +1732,7 @@ def func_with_approval(arg1: str) -> str: # Get the streaming response with approval request updates = [] async for update in chat_client_base.get_streaming_response( - "hello", tool_choice="auto", tools=[func_with_approval] + "hello", options={"tool_choice": "auto", "tools": [func_with_approval]} ): updates.append(update) @@ -1770,7 +1788,9 @@ def ai_func(arg1: str) -> str: chat_client_base.function_invocation_configuration.max_iterations = 1 updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[ai_func]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [ai_func]} + ): updates.append(update) # With max_iterations=1, we should only execute first function @@ -1798,7 +1818,9 @@ def ai_func(arg1: str) -> str: chat_client_base.function_invocation_configuration.enabled = False updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[ai_func]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [ai_func]} + ): updates.append(update) # Function should not be executed - when enabled=False, the loop doesn't run @@ -1841,7 +1863,9 @@ def error_func(arg1: str) -> str: chat_client_base.function_invocation_configuration.max_consecutive_errors_per_request = 2 updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[error_func]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [error_func]} + ): updates.append(update) # Should stop after 2 consecutive errors @@ -1887,7 +1911,9 @@ def known_func(arg1: str) -> str: chat_client_base.function_invocation_configuration.terminate_on_unknown_calls = False updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[known_func]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [known_func]} + ): updates.append(update) # Should have a result message indicating the tool wasn't found @@ -1926,7 +1952,9 @@ def known_func(arg1: str) -> str: # Should raise an exception when encountering an unknown function with pytest.raises(KeyError, match='Error: Requested function "unknown_function" not found'): - async for _ in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[known_func]): + async for _ in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [known_func]} + ): pass assert exec_counter == 0 @@ -1953,7 +1981,9 @@ def error_func(arg1: str) -> str: chat_client_base.function_invocation_configuration.include_detailed_errors = True updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[error_func]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [error_func]} + ): updates.append(update) # Should have detailed error message @@ -1989,7 +2019,9 @@ def error_func(arg1: str) -> str: chat_client_base.function_invocation_configuration.include_detailed_errors = False updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[error_func]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [error_func]} + ): updates.append(update) # Should have a generic error message @@ -2023,7 +2055,9 @@ def typed_func(arg1: int) -> str: # Expects int, not str chat_client_base.function_invocation_configuration.include_detailed_errors = True updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[typed_func]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [typed_func]} + ): updates.append(update) # Should have detailed validation error @@ -2057,7 +2091,9 @@ def typed_func(arg1: int) -> str: # Expects int, not str chat_client_base.function_invocation_configuration.include_detailed_errors = False updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[typed_func]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [typed_func]} + ): updates.append(update) # Should have generic validation error @@ -2105,7 +2141,9 @@ async def func2(arg1: str) -> str: ] updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[func1, func2]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [func1, func2]} + ): updates.append(update) # Both functions should have been executed @@ -2144,7 +2182,7 @@ def func_with_approval(arg1: str) -> str: updates = [] async for update in chat_client_base.get_streaming_response( - "hello", tool_choice="auto", tools=[func_with_approval] + "hello", options={"tool_choice": "auto", "tools": [func_with_approval]} ): updates.append(update) @@ -2189,7 +2227,9 @@ def sometimes_fails(arg1: str) -> str: ] updates = [] - async for update in chat_client_base.get_streaming_response("hello", tool_choice="auto", tools=[sometimes_fails]): + async for update in chat_client_base.get_streaming_response( + "hello", options={"tool_choice": "auto", "tools": [sometimes_fails]} + ): updates.append(update) # Should have both an error and a success @@ -2246,8 +2286,7 @@ def ai_func(arg1: str) -> str: response = await chat_client_base.get_response( "hello", - tool_choice="auto", - tools=[ai_func], + options={"tool_choice": "auto", "tools": [ai_func]}, middleware=[TerminateLoopMiddleware()], ) @@ -2314,8 +2353,7 @@ def terminating_func(arg1: str) -> str: response = await chat_client_base.get_response( "hello", - tool_choice="auto", - tools=[normal_func, terminating_func], + options={"tool_choice": "auto", "tools": [normal_func, terminating_func]}, middleware=[SelectiveTerminateMiddleware()], ) @@ -2366,8 +2404,7 @@ def ai_func(arg1: str) -> str: updates = [] async for update in chat_client_base.get_streaming_response( "hello", - tool_choice="auto", - tools=[ai_func], + options={"tool_choice": "auto", "tools": [ai_func]}, middleware=[TerminateLoopMiddleware()], ): updates.append(update) diff --git a/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py new file mode 100644 index 0000000000..fc6acb435d --- /dev/null +++ b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py @@ -0,0 +1,218 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for kwargs propagation from get_response() to @ai_function tools.""" + +from typing import Any + +from agent_framework import ( + ChatMessage, + ChatResponse, + ChatResponseUpdate, + FunctionCallContent, + TextContent, + ai_function, +) +from agent_framework._tools import _handle_function_calls_response, _handle_function_calls_streaming_response + + +class TestKwargsPropagationToAIFunction: + """Test cases for kwargs flowing from get_response() to @ai_function tools.""" + + async def test_kwargs_propagate_to_ai_function_with_kwargs(self) -> None: + """Test that kwargs passed to get_response() are available in @ai_function **kwargs.""" + captured_kwargs: dict[str, Any] = {} + + @ai_function + def capture_kwargs_tool(x: int, **kwargs: Any) -> str: + """A tool that captures kwargs for testing.""" + captured_kwargs.update(kwargs) + return f"result: x={x}" + + # Create a mock client + mock_client = type("MockClient", (), {})() + + call_count = [0] + + async def mock_get_response(self, messages, **kwargs): + call_count[0] += 1 + if call_count[0] == 1: + # First call: return a function call + return ChatResponse( + messages=[ + ChatMessage( + role="assistant", + contents=[ + FunctionCallContent(call_id="call_1", name="capture_kwargs_tool", arguments='{"x": 42}') + ], + ) + ] + ) + # Second call: return final response + return ChatResponse(messages=[ChatMessage(role="assistant", text="Done!")]) + + # Wrap the function with function invocation decorator + wrapped = _handle_function_calls_response(mock_get_response) + + # Call with custom kwargs that should propagate to the tool + # Note: tools are passed in options dict, custom kwargs are passed separately + result = await wrapped( + mock_client, + messages=[], + options={"tools": [capture_kwargs_tool]}, + user_id="user-123", + session_token="secret-token", + custom_data={"key": "value"}, + ) + + # Verify the tool was called and received the kwargs + assert "user_id" in captured_kwargs, f"Expected 'user_id' in captured kwargs: {captured_kwargs}" + assert captured_kwargs["user_id"] == "user-123" + assert "session_token" in captured_kwargs + assert captured_kwargs["session_token"] == "secret-token" + assert "custom_data" in captured_kwargs + assert captured_kwargs["custom_data"] == {"key": "value"} + # Verify result + assert result.messages[-1].text == "Done!" + + async def test_kwargs_not_forwarded_to_ai_function_without_kwargs(self) -> None: + """Test that kwargs are NOT forwarded to @ai_function that doesn't accept **kwargs.""" + + @ai_function + def simple_tool(x: int) -> str: + """A simple tool without **kwargs.""" + # This should not receive any extra kwargs + return f"result: x={x}" + + mock_client = type("MockClient", (), {})() + + call_count = [0] + + async def mock_get_response(self, messages, **kwargs): + call_count[0] += 1 + if call_count[0] == 1: + return ChatResponse( + messages=[ + ChatMessage( + role="assistant", + contents=[FunctionCallContent(call_id="call_1", name="simple_tool", arguments='{"x": 99}')], + ) + ] + ) + return ChatResponse(messages=[ChatMessage(role="assistant", text="Completed!")]) + + wrapped = _handle_function_calls_response(mock_get_response) + + # Call with kwargs - the tool should work but not receive them + result = await wrapped( + mock_client, + messages=[], + options={"tools": [simple_tool]}, + user_id="user-123", # This kwarg should be ignored by the tool + ) + + # Verify the tool was called successfully (no error from extra kwargs) + assert result.messages[-1].text == "Completed!" + + async def test_kwargs_isolated_between_function_calls(self) -> None: + """Test that kwargs don't leak between different function call invocations.""" + invocation_kwargs: list[dict[str, Any]] = [] + + @ai_function + def tracking_tool(name: str, **kwargs: Any) -> str: + """A tool that tracks kwargs from each invocation.""" + invocation_kwargs.append(dict(kwargs)) + return f"called with {name}" + + mock_client = type("MockClient", (), {})() + + call_count = [0] + + async def mock_get_response(self, messages, **kwargs): + call_count[0] += 1 + if call_count[0] == 1: + # Two function calls in one response + return ChatResponse( + messages=[ + ChatMessage( + role="assistant", + contents=[ + FunctionCallContent( + call_id="call_1", name="tracking_tool", arguments='{"name": "first"}' + ), + FunctionCallContent( + call_id="call_2", name="tracking_tool", arguments='{"name": "second"}' + ), + ], + ) + ] + ) + return ChatResponse(messages=[ChatMessage(role="assistant", text="All done!")]) + + wrapped = _handle_function_calls_response(mock_get_response) + + # Call with kwargs + result = await wrapped( + mock_client, + messages=[], + options={"tools": [tracking_tool]}, + request_id="req-001", + trace_context={"trace_id": "abc"}, + ) + + # Both invocations should have received the same kwargs + assert len(invocation_kwargs) == 2 + for kwargs in invocation_kwargs: + assert kwargs.get("request_id") == "req-001" + assert kwargs.get("trace_context") == {"trace_id": "abc"} + assert result.messages[-1].text == "All done!" + + async def test_streaming_response_kwargs_propagation(self) -> None: + """Test that kwargs propagate to @ai_function in streaming mode.""" + captured_kwargs: dict[str, Any] = {} + + @ai_function + def streaming_capture_tool(value: str, **kwargs: Any) -> str: + """A tool that captures kwargs during streaming.""" + captured_kwargs.update(kwargs) + return f"processed: {value}" + + mock_client = type("MockClient", (), {})() + + call_count = [0] + + async def mock_get_streaming_response(self, messages, **kwargs): + call_count[0] += 1 + if call_count[0] == 1: + # First call: return function call update + yield ChatResponseUpdate( + role="assistant", + contents=[ + FunctionCallContent( + call_id="stream_call_1", + name="streaming_capture_tool", + arguments='{"value": "streaming-test"}', + ) + ], + is_finished=True, + ) + else: + # Second call: return final response + yield ChatResponseUpdate(text=TextContent(text="Stream complete!"), role="assistant", is_finished=True) + + wrapped = _handle_function_calls_streaming_response(mock_get_streaming_response) + + # Collect streaming updates + updates: list[ChatResponseUpdate] = [] + async for update in wrapped( + mock_client, + messages=[], + options={"tools": [streaming_capture_tool]}, + streaming_session="session-xyz", + correlation_id="corr-123", + ): + updates.append(update) + + # Verify kwargs were captured by the tool + assert "streaming_session" in captured_kwargs, f"Expected 'streaming_session' in {captured_kwargs}" + assert captured_kwargs["streaming_session"] == "session-xyz" + assert captured_kwargs["correlation_id"] == "corr-123" diff --git a/python/packages/core/tests/core/test_middleware.py b/python/packages/core/tests/core/test_middleware.py index 552d3a3c48..2ec11e9be9 100644 --- a/python/packages/core/tests/core/test_middleware.py +++ b/python/packages/core/tests/core/test_middleware.py @@ -29,7 +29,6 @@ FunctionMiddlewarePipeline, ) from agent_framework._tools import AIFunction -from agent_framework._types import ChatOptions class TestAgentRunContext: @@ -100,12 +99,12 @@ class TestChatContext: def test_init_with_defaults(self, mock_chat_client: Any) -> None: """Test ChatContext initialization with default values.""" messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) assert context.chat_client is mock_chat_client assert context.messages == messages - assert context.chat_options is chat_options + assert context.options is chat_options assert context.is_streaming is False assert context.metadata == {} assert context.result is None @@ -114,13 +113,13 @@ def test_init_with_defaults(self, mock_chat_client: Any) -> None: def test_init_with_custom_values(self, mock_chat_client: Any) -> None: """Test ChatContext initialization with custom values.""" messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions(temperature=0.5) + chat_options: dict[str, Any] = {"temperature": 0.5} metadata = {"key": "value"} context = ChatContext( chat_client=mock_chat_client, messages=messages, - chat_options=chat_options, + options=chat_options, is_streaming=True, metadata=metadata, terminate=True, @@ -128,7 +127,7 @@ def test_init_with_custom_values(self, mock_chat_client: Any) -> None: assert context.chat_client is mock_chat_client assert context.messages == messages - assert context.chat_options is chat_options + assert context.options is chat_options assert context.is_streaming is True assert context.metadata == metadata assert context.terminate is True @@ -562,8 +561,8 @@ async def test_execute_no_middleware(self, mock_chat_client: Any) -> None: """Test pipeline execution with no middleware.""" pipeline = ChatMiddlewarePipeline() messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) expected_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")]) @@ -589,8 +588,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai middleware = OrderTrackingChatMiddleware("test") pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) expected_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")]) @@ -606,8 +605,8 @@ async def test_execute_stream_no_middleware(self, mock_chat_client: Any) -> None """Test pipeline streaming execution with no middleware.""" pipeline = ChatMiddlewarePipeline() messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[TextContent(text="chunk1")]) @@ -637,10 +636,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai middleware = StreamOrderTrackingChatMiddleware("test") pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext( - chat_client=mock_chat_client, messages=messages, chat_options=chat_options, is_streaming=True - ) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True) async def final_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: execution_order.append("handler_start") @@ -662,8 +659,8 @@ async def test_execute_with_pre_next_termination(self, mock_chat_client: Any) -> middleware = self.PreNextTerminateChatMiddleware() pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) execution_order: list[str] = [] async def final_handler(ctx: ChatContext) -> ChatResponse: @@ -682,8 +679,8 @@ async def test_execute_with_post_next_termination(self, mock_chat_client: Any) - middleware = self.PostNextTerminateChatMiddleware() pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) execution_order: list[str] = [] async def final_handler(ctx: ChatContext) -> ChatResponse: @@ -702,10 +699,8 @@ async def test_execute_stream_with_pre_next_termination(self, mock_chat_client: middleware = self.PreNextTerminateChatMiddleware() pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext( - chat_client=mock_chat_client, messages=messages, chat_options=chat_options, is_streaming=True - ) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True) execution_order: list[str] = [] async def final_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: @@ -729,10 +724,8 @@ async def test_execute_stream_with_post_next_termination(self, mock_chat_client: middleware = self.PostNextTerminateChatMiddleware() pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext( - chat_client=mock_chat_client, messages=messages, chat_options=chat_options, is_streaming=True - ) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True) execution_order: list[str] = [] async def final_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: @@ -962,8 +955,8 @@ async def function_chat_middleware( pipeline = ChatMiddlewarePipeline([ClassChatMiddleware(), function_chat_middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: execution_order.append("handler") @@ -1093,8 +1086,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai middleware = [FirstChatMiddleware(), SecondChatMiddleware(), ThirdChatMiddleware()] pipeline = ChatMiddlewarePipeline(middleware) # type: ignore messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: execution_order.append("handler") @@ -1203,7 +1196,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai # Verify context has all expected attributes assert hasattr(context, "chat_client") assert hasattr(context, "messages") - assert hasattr(context, "chat_options") + assert hasattr(context, "options") assert hasattr(context, "is_streaming") assert hasattr(context, "metadata") assert hasattr(context, "result") @@ -1216,8 +1209,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai assert context.messages[0].text == "test" assert context.is_streaming is False assert isinstance(context.metadata, dict) - assert isinstance(context.chat_options, ChatOptions) - assert context.chat_options.temperature == 0.5 + assert isinstance(context.options, dict) + assert context.options.get("temperature") == 0.5 # Add custom metadata context.metadata["validated"] = True @@ -1227,8 +1220,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai middleware = ChatContextValidationMiddleware() pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions(temperature=0.5) - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {"temperature": 0.5} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: # Verify metadata was set by middleware @@ -1331,10 +1324,10 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai middleware = ChatStreamingFlagMiddleware() pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() + chat_options: dict[str, Any] = {} # Test non-streaming - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: streaming_flags.append(ctx.is_streaming) @@ -1344,7 +1337,7 @@ async def final_handler(ctx: ChatContext) -> ChatResponse: # Test streaming context_stream = ChatContext( - chat_client=mock_chat_client, messages=messages, chat_options=chat_options, is_streaming=True + chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True ) async def final_stream_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: @@ -1373,10 +1366,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai middleware = ChatStreamProcessingMiddleware() pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext( - chat_client=mock_chat_client, messages=messages, chat_options=chat_options, is_streaming=True - ) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True) async def final_stream_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: chunks_processed.append("stream_start") @@ -1590,8 +1581,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai middleware = NoNextChatMiddleware() pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) handler_called = False @@ -1618,10 +1609,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai middleware = NoNextStreamingChatMiddleware() pipeline = ChatMiddlewarePipeline([middleware]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext( - chat_client=mock_chat_client, messages=messages, chat_options=chat_options, is_streaming=True - ) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True) handler_called = False @@ -1656,8 +1645,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai pipeline = ChatMiddlewarePipeline([FirstChatMiddleware(), SecondChatMiddleware()]) messages = [ChatMessage(role=Role.USER, text="test")] - chat_options = ChatOptions() - context = ChatContext(chat_client=mock_chat_client, messages=messages, chat_options=chat_options) + chat_options: dict[str, Any] = {} + context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) handler_called = False diff --git a/python/packages/core/tests/core/test_middleware_with_agent.py b/python/packages/core/tests/core/test_middleware_with_agent.py index 147c474800..38625db168 100644 --- a/python/packages/core/tests/core/test_middleware_with_agent.py +++ b/python/packages/core/tests/core/test_middleware_with_agent.py @@ -734,7 +734,7 @@ async def process( async def test_function_middleware_can_access_and_override_custom_kwargs( self, chat_client: "MockChatClient" ) -> None: - """Test that function middleware can access and override custom parameters like temperature.""" + """Test that function middleware can access and override custom parameters.""" captured_kwargs: dict[str, Any] = {} modified_kwargs: dict[str, Any] = {} middleware_called = False @@ -747,38 +747,20 @@ async def kwargs_middleware( middleware_called = True # Capture the original kwargs - captured_kwargs["has_chat_options"] = "chat_options" in context.kwargs captured_kwargs["has_custom_param"] = "custom_param" in context.kwargs captured_kwargs["custom_param"] = context.kwargs.get("custom_param") - # Capture original chat_options values if present - if "chat_options" in context.kwargs: - chat_options = context.kwargs["chat_options"] - captured_kwargs["original_temperature"] = getattr(chat_options, "temperature", None) - captured_kwargs["original_max_tokens"] = getattr(chat_options, "max_tokens", None) - # Modify some kwargs context.kwargs["temperature"] = 0.9 context.kwargs["max_tokens"] = 500 context.kwargs["new_param"] = "added_by_middleware" - # Also modify chat_options if present - if "chat_options" in context.kwargs: - context.kwargs["chat_options"].temperature = 0.9 - context.kwargs["chat_options"].max_tokens = 500 - # Store modified kwargs for verification modified_kwargs["temperature"] = context.kwargs.get("temperature") modified_kwargs["max_tokens"] = context.kwargs.get("max_tokens") modified_kwargs["new_param"] = context.kwargs.get("new_param") modified_kwargs["custom_param"] = context.kwargs.get("custom_param") - # Capture modified chat_options values if present - if "chat_options" in context.kwargs: - chat_options = context.kwargs["chat_options"] - modified_kwargs["chat_options_temperature"] = getattr(chat_options, "temperature", None) - modified_kwargs["chat_options_max_tokens"] = getattr(chat_options, "max_tokens", None) - await next(context) chat_client.responses = [ @@ -800,9 +782,9 @@ async def kwargs_middleware( # Create ChatAgent with function middleware agent = ChatAgent(chat_client=chat_client, middleware=[kwargs_middleware], tools=[sample_tool_function]) - # Execute the agent with custom parameters + # Execute the agent with custom parameters passed as kwargs messages = [ChatMessage(role=Role.USER, text="test message")] - response = await agent.run(messages, temperature=0.7, max_tokens=100, custom_param="test_value") + response = await agent.run(messages, custom_param="test_value") # Verify response assert response is not None @@ -812,19 +794,14 @@ async def kwargs_middleware( assert middleware_called, "Function middleware was not called" # Verify middleware captured the original kwargs - assert captured_kwargs["has_chat_options"] is True assert captured_kwargs["has_custom_param"] is True assert captured_kwargs["custom_param"] == "test_value" - assert captured_kwargs["original_temperature"] == 0.7 - assert captured_kwargs["original_max_tokens"] == 100 # Verify middleware could modify the kwargs assert modified_kwargs["temperature"] == 0.9 assert modified_kwargs["max_tokens"] == 500 assert modified_kwargs["new_param"] == "added_by_middleware" assert modified_kwargs["custom_param"] == "test_value" - assert modified_kwargs["chat_options_temperature"] == 0.9 - assert modified_kwargs["chat_options_max_tokens"] == 500 class TestMiddlewareDynamicRebuild: diff --git a/python/packages/core/tests/core/test_middleware_with_chat.py b/python/packages/core/tests/core/test_middleware_with_chat.py index 91c501de6b..9d395284ea 100644 --- a/python/packages/core/tests/core/test_middleware_with_chat.py +++ b/python/packages/core/tests/core/test_middleware_with_chat.py @@ -366,7 +366,7 @@ def sample_tool(location: str) -> str: # Execute the chat client directly with tools - this should trigger function invocation and middleware messages = [ChatMessage(role=Role.USER, text="What's the weather in San Francisco?")] - response = await chat_client.get_response(messages, tools=[sample_tool]) + response = await chat_client.get_response(messages, options={"tools": [sample_tool]}) # Verify response assert response is not None @@ -423,7 +423,7 @@ def sample_tool(location: str) -> str: # Execute the chat client directly with run-level middleware and tools messages = [ChatMessage(role=Role.USER, text="What's the weather in New York?")] response = await chat_client.get_response( - messages, tools=[sample_tool], middleware=[run_level_function_middleware] + messages, options={"tools": [sample_tool]}, middleware=[run_level_function_middleware] ) # Verify response diff --git a/python/packages/core/tests/core/test_observability.py b/python/packages/core/tests/core/test_observability.py index 38835fbc91..5b9704abb0 100644 --- a/python/packages/core/tests/core/test_observability.py +++ b/python/packages/core/tests/core/test_observability.py @@ -17,7 +17,6 @@ AgentThread, BaseChatClient, ChatMessage, - ChatOptions, ChatResponse, ChatResponseUpdate, Role, @@ -215,7 +214,7 @@ def service_url(self): return "https://test.example.com" async def _inner_get_response( - self, *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ): return ChatResponse( messages=[ChatMessage(role=Role.ASSISTANT, text="Test response")], @@ -224,7 +223,7 @@ async def _inner_get_response( ) async def _inner_get_streaming_response( - self, *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ): yield ChatResponseUpdate(text="Hello", role=Role.ASSISTANT) yield ChatResponseUpdate(text=" world", role=Role.ASSISTANT) @@ -405,7 +404,7 @@ def __init__(self): self.id = "test_agent_id" self.name = "test_agent" self.description = "Test agent description" - self.chat_options = ChatOptions(model_id="TestModel") + self.default_options: dict[str, Any] = {"model_id": "TestModel"} async def run(self, messages=None, *, thread=None, **kwargs): return AgentRunResponse( diff --git a/python/packages/core/tests/core/test_tools.py b/python/packages/core/tests/core/test_tools.py index 73327b4c1f..77442be322 100644 --- a/python/packages/core/tests/core/test_tools.py +++ b/python/packages/core/tests/core/test_tools.py @@ -429,7 +429,7 @@ async def simple_tool(message: str) -> str: result = await simple_tool.invoke( arguments=args, api_token="secret-token", - chat_options={"model_id": "dummy"}, + options={"model_id": "dummy"}, ) assert result == "HELLO WORLD" @@ -1035,7 +1035,7 @@ async def mock_get_response(self, messages, **kwargs): wrapped = _handle_function_calls_response(mock_get_response) # Execute - result = await wrapped(mock_client, messages=[], tools=[no_approval_tool]) + result = await wrapped(mock_client, messages=[], options={"tools": [no_approval_tool]}) # Verify: should have 3 messages: function call, function result, final answer assert len(result.messages) == 3 @@ -1075,7 +1075,7 @@ async def mock_get_response(self, messages, **kwargs): wrapped = _handle_function_calls_response(mock_get_response) # Execute - result = await wrapped(mock_client, messages=[], tools=[requires_approval_tool]) + result = await wrapped(mock_client, messages=[], options={"tools": [requires_approval_tool]}) # Verify: should return 1 message with function call and approval request from agent_framework import FunctionApprovalRequestContent @@ -1121,7 +1121,7 @@ async def mock_get_response(self, messages, **kwargs): wrapped = _handle_function_calls_response(mock_get_response) # Execute - result = await wrapped(mock_client, messages=[], tools=[no_approval_tool]) + result = await wrapped(mock_client, messages=[], options={"tools": [no_approval_tool]}) # Verify: should have function calls, results, and final answer from agent_framework import FunctionResultContent @@ -1167,7 +1167,7 @@ async def mock_get_response(self, messages, **kwargs): wrapped = _handle_function_calls_response(mock_get_response) # Execute - result = await wrapped(mock_client, messages=[], tools=[requires_approval_tool]) + result = await wrapped(mock_client, messages=[], options={"tools": [requires_approval_tool]}) # Verify: should return 1 message with function calls and approval requests from agent_framework import FunctionApprovalRequestContent @@ -1213,7 +1213,7 @@ async def mock_get_response(self, messages, **kwargs): wrapped = _handle_function_calls_response(mock_get_response) # Execute - result = await wrapped(mock_client, messages=[], tools=[no_approval_tool, requires_approval_tool]) + result = await wrapped(mock_client, messages=[], options={"tools": [no_approval_tool, requires_approval_tool]}) # Verify: should return approval requests for both (when one needs approval, all are sent for approval) from agent_framework import FunctionApprovalRequestContent @@ -1253,7 +1253,7 @@ async def mock_get_streaming_response(self, messages, **kwargs): # Execute and collect updates updates = [] - async for update in wrapped(mock_client, messages=[], tools=[no_approval_tool]): + async for update in wrapped(mock_client, messages=[], options={"tools": [no_approval_tool]}): updates.append(update) # Verify: should have function call update, tool result update (injected), and final update @@ -1298,7 +1298,7 @@ async def mock_get_streaming_response(self, messages, **kwargs): # Execute and collect updates updates = [] - async for update in wrapped(mock_client, messages=[], tools=[requires_approval_tool]): + async for update in wrapped(mock_client, messages=[], options={"tools": [requires_approval_tool]}): updates.append(update) # Verify: should yield function call and then approval request @@ -1343,7 +1343,7 @@ async def mock_get_streaming_response(self, messages, **kwargs): # Execute and collect updates updates = [] - async for update in wrapped(mock_client, messages=[], tools=[no_approval_tool]): + async for update in wrapped(mock_client, messages=[], options={"tools": [no_approval_tool]}): updates.append(update) # Verify: should have both function calls, one tool result update with both results, and final message @@ -1392,7 +1392,7 @@ async def mock_get_streaming_response(self, messages, **kwargs): # Execute and collect updates updates = [] - async for update in wrapped(mock_client, messages=[], tools=[requires_approval_tool]): + async for update in wrapped(mock_client, messages=[], options={"tools": [requires_approval_tool]}): updates.append(update) # Verify: should yield both function calls and then approval requests @@ -1439,7 +1439,9 @@ async def mock_get_streaming_response(self, messages, **kwargs): # Execute and collect updates updates = [] - async for update in wrapped(mock_client, messages=[], tools=[no_approval_tool, requires_approval_tool]): + async for update in wrapped( + mock_client, messages=[], options={"tools": [no_approval_tool, requires_approval_tool]} + ): updates.append(update) # Verify: should yield both function calls and then approval requests (when one needs approval, all wait) diff --git a/python/packages/core/tests/core/test_types.py b/python/packages/core/tests/core/test_types.py index 6e6e5bfee7..5376f72754 100644 --- a/python/packages/core/tests/core/test_types.py +++ b/python/packages/core/tests/core/test_types.py @@ -43,6 +43,7 @@ UsageContent, UsageDetails, ai_function, + merge_chat_options, prepare_function_call_results, ) from agent_framework.exceptions import AdditionItemMismatch, ContentError @@ -866,117 +867,149 @@ async def gen() -> AsyncIterable[ChatResponseUpdate]: def test_chat_tool_mode(): """Test the ToolMode class to ensure it initializes correctly.""" # Create instances of ToolMode - auto_mode = ToolMode.AUTO - required_any = ToolMode.REQUIRED_ANY - required_mode = ToolMode.REQUIRED("example_function") - none_mode = ToolMode.NONE + auto_mode: ToolMode = {"mode": "auto"} + required_any: ToolMode = {"mode": "required"} + required_mode: ToolMode = {"mode": "required", "required_function_name": "example_function"} + none_mode: ToolMode = {"mode": "none"} # Check the type and content - assert auto_mode.mode == "auto" - assert auto_mode.required_function_name is None - assert required_any.mode == "required" - assert required_any.required_function_name is None - assert required_mode.mode == "required" - assert required_mode.required_function_name == "example_function" - assert none_mode.mode == "none" - assert none_mode.required_function_name is None - - # Ensure the instances are of type ToolMode - assert isinstance(auto_mode, ToolMode) - assert isinstance(required_any, ToolMode) - assert isinstance(required_mode, ToolMode) - assert isinstance(none_mode, ToolMode) - - assert ToolMode.REQUIRED("example_function") == ToolMode.REQUIRED("example_function") - # serializer returns just the mode - assert ToolMode.REQUIRED_ANY.serialize_model() == "required" + assert auto_mode["mode"] == "auto" + assert "required_function_name" not in auto_mode + assert required_any["mode"] == "required" + assert "required_function_name" not in required_any + assert required_mode["mode"] == "required" + assert required_mode["required_function_name"] == "example_function" + assert none_mode["mode"] == "none" + assert "required_function_name" not in none_mode + + # equality of dicts + assert {"mode": "required", "required_function_name": "example_function"} == { + "mode": "required", + "required_function_name": "example_function", + } def test_chat_tool_mode_from_dict(): """Test creating ToolMode from a dictionary.""" - mode_dict = {"mode": "required", "required_function_name": "example_function"} - mode = ToolMode(**mode_dict) + mode: ToolMode = {"mode": "required", "required_function_name": "example_function"} # Check the type and content - assert mode.mode == "required" - assert mode.required_function_name == "example_function" - - # Ensure the instance is of type ToolMode - assert isinstance(mode, ToolMode) + assert mode["mode"] == "required" + assert mode["required_function_name"] == "example_function" # region ChatOptions def test_chat_options_init() -> None: - options = ChatOptions() - assert options.model_id is None + """Test that ChatOptions can be created as a TypedDict.""" + options: ChatOptions = {} + assert options.get("model_id") is None + + # With values + options_with_model: ChatOptions = {"model_id": "gpt-4o", "temperature": 0.7} + assert options_with_model.get("model_id") == "gpt-4o" + assert options_with_model.get("temperature") == 0.7 + + +def test_chat_options_tool_choice_validation(): + """Test validate_tool_mode utility function.""" + from agent_framework._types import validate_tool_mode + + # Valid string values + assert validate_tool_mode("auto") == {"mode": "auto"} + assert validate_tool_mode("required") == {"mode": "required"} + assert validate_tool_mode("none") == {"mode": "none"} + + # Valid ToolMode dict values + assert validate_tool_mode({"mode": "auto"}) == {"mode": "auto"} + assert validate_tool_mode({"mode": "required"}) == {"mode": "required"} + assert validate_tool_mode({"mode": "required", "required_function_name": "example_function"}) == { + "mode": "required", + "required_function_name": "example_function", + } + assert validate_tool_mode({"mode": "none"}) == {"mode": "none"} + # None should return mode==none + assert validate_tool_mode(None) == {"mode": "none"} -def test_chat_options_tool_choice_validation_errors(): - with raises((ContentError, TypeError)): - ChatOptions(tool_choice="invalid-choice") + with raises(ContentError): + validate_tool_mode("invalid_mode") + with raises(ContentError): + validate_tool_mode({"mode": "invalid_mode"}) + with raises(ContentError): + validate_tool_mode({"mode": "auto", "required_function_name": "should_not_be_here"}) -def test_chat_options_and(ai_function_tool, ai_tool) -> None: - options1 = ChatOptions(model_id="gpt-4o", tools=[ai_function_tool], logit_bias={"x": 1}, metadata={"a": "b"}) - options2 = ChatOptions(model_id="gpt-4.1", tools=[ai_tool], additional_properties={"p": 1}) +def test_chat_options_merge(ai_function_tool, ai_tool) -> None: + """Test merge_chat_options utility function.""" + from agent_framework import merge_chat_options + + options1: ChatOptions = { + "model_id": "gpt-4o", + "tools": [ai_function_tool], + "logit_bias": {"x": 1}, + "metadata": {"a": "b"}, + } + options2: ChatOptions = {"model_id": "gpt-4.1", "tools": [ai_tool]} assert options1 != options2 - options3 = options1 & options2 - assert options3.model_id == "gpt-4.1" - assert options3.tools == [ai_function_tool, ai_tool] - assert options3.logit_bias == {"x": 1} - assert options3.metadata == {"a": "b"} - assert options3.additional_properties.get("p") == 1 + # Merge options - override takes precedence for non-collection fields + options3 = merge_chat_options(options1, options2) + + assert options3.get("model_id") == "gpt-4.1" + assert options3.get("tools") == [ai_function_tool, ai_tool] # tools are combined + assert options3.get("logit_bias") == {"x": 1} # base value preserved + assert options3.get("metadata") == {"a": "b"} # base value preserved def test_chat_options_and_tool_choice_override() -> None: """Test that tool_choice from other takes precedence in ChatOptions merge.""" # Agent-level defaults to "auto" - agent_options = ChatOptions(model_id="gpt-4o", tool_choice="auto") + agent_options: ChatOptions = {"model_id": "gpt-4o", "tool_choice": "auto"} # Run-level specifies "required" - run_options = ChatOptions(tool_choice="required") + run_options: ChatOptions = {"tool_choice": "required"} - merged = agent_options & run_options + merged = merge_chat_options(agent_options, run_options) # Run-level should override agent-level - assert merged.tool_choice == "required" - assert merged.model_id == "gpt-4o" # Other fields preserved + assert merged.get("tool_choice") == "required" + assert merged.get("model_id") == "gpt-4o" # Other fields preserved def test_chat_options_and_tool_choice_none_in_other_uses_self() -> None: """Test that when other.tool_choice is None, self.tool_choice is used.""" - agent_options = ChatOptions(tool_choice="auto") - run_options = ChatOptions(model_id="gpt-4.1") # tool_choice is None + agent_options: ChatOptions = {"tool_choice": "auto"} + run_options: ChatOptions = {"model_id": "gpt-4.1"} # tool_choice is None - merged = agent_options & run_options + merged = merge_chat_options(agent_options, run_options) # Should keep agent-level tool_choice since run-level is None - assert merged.tool_choice == "auto" - assert merged.model_id == "gpt-4.1" + assert merged.get("tool_choice") == "auto" + assert merged.get("model_id") == "gpt-4.1" def test_chat_options_and_tool_choice_with_tool_mode() -> None: """Test ChatOptions merge with ToolMode objects.""" - agent_options = ChatOptions(tool_choice=ToolMode.AUTO) - run_options = ChatOptions(tool_choice=ToolMode.REQUIRED_ANY) + agent_options: ChatOptions = {"tool_choice": "auto"} + run_options: ChatOptions = {"tool_choice": "required"} - merged = agent_options & run_options + merged = merge_chat_options(agent_options, run_options) - assert merged.tool_choice == ToolMode.REQUIRED_ANY - assert merged.tool_choice == "required" # ToolMode equality with string + assert merged.get("tool_choice") == "required" + assert merged.get("tool_choice") == "required" def test_chat_options_and_tool_choice_required_specific_function() -> None: """Test ChatOptions merge with required specific function.""" - agent_options = ChatOptions(tool_choice="auto") - run_options = ChatOptions(tool_choice=ToolMode.REQUIRED(function_name="get_weather")) + agent_options: ChatOptions = {"tool_choice": "auto"} + run_options: ChatOptions = {"tool_choice": {"mode": "required", "required_function_name": "get_weather"}} - merged = agent_options & run_options + merged = merge_chat_options(agent_options, run_options) - assert merged.tool_choice == "required" - assert merged.tool_choice.required_function_name == "get_weather" + tool_choice = merged.get("tool_choice") + assert tool_choice == {"mode": "required", "required_function_name": "get_weather"} + assert tool_choice["required_function_name"] == "get_weather" # region Agent Response Fixtures @@ -1249,7 +1282,7 @@ def test_function_call_content_parse_numeric_or_list(): def test_chat_tool_mode_eq_with_string(): - assert ToolMode.AUTO == "auto" + assert {"mode": "auto"} == {"mode": "auto"} # region AgentRunResponse @@ -1437,30 +1470,6 @@ def test_chat_message_from_dict_with_mixed_content(): assert len(message_dict["contents"]) == 3 -def test_chat_options_edge_cases(): - """Test ChatOptions with edge cases for better coverage.""" - - # Test with tools conversion - def sample_tool(): - return "test" - - options = ChatOptions(tools=[sample_tool], tool_choice="auto") - assert options.tool_choice == ToolMode.AUTO - - # Test to_dict with ToolMode - options_dict = options.to_dict() - assert "tool_choice" in options_dict - - # Test from_dict with tool_choice dict - data_with_dict_tool_choice = { - "model_id": "gpt-4", - "tool_choice": {"mode": "required", "required_function_name": "test_func"}, - } - options_from_dict = ChatOptions.from_dict(data_with_dict_tool_choice) - assert options_from_dict.tool_choice.mode == "required" - assert options_from_dict.tool_choice.required_function_name == "test_func" - - def test_text_content_add_type_error(): """Test TextContent __add__ raises TypeError for incompatible types.""" t1 = TextContent("Hello") @@ -1501,30 +1510,6 @@ def test_comprehensive_serialization_methods(): assert result_content.result == "success" -def test_chat_options_tool_choice_variations(): - """Test ChatOptions from_dict and to_dict with various tool_choice values.""" - - # Test with string tool_choice - data = {"model_id": "gpt-4", "tool_choice": "auto", "temperature": 0.7} - options = ChatOptions.from_dict(data) - assert options.tool_choice == ToolMode.AUTO - - # Test with dict tool_choice - data_dict = { - "model_id": "gpt-4", - "tool_choice": {"mode": "required", "required_function_name": "test_func"}, - "temperature": 0.7, - } - options_dict = ChatOptions.from_dict(data_dict) - assert options_dict.tool_choice.mode == "required" - assert options_dict.tool_choice.required_function_name == "test_func" - - # Test to_dict with ToolMode - options_dict_serialized = options_dict.to_dict() - assert "tool_choice" in options_dict_serialized - assert isinstance(options_dict_serialized["tool_choice"], dict) - - def test_chat_message_complex_content_serialization(): """Test ChatMessage serialization with various content types.""" diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index 861ccc73d1..77605432ff 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -17,7 +17,6 @@ ChatAgent, ChatClientProtocol, ChatMessage, - ChatOptions, ChatResponse, ChatResponseUpdate, FunctionCallContent, @@ -27,7 +26,6 @@ HostedVectorStoreContent, Role, TextContent, - ToolMode, UriContent, UsageContent, ai_function, @@ -43,6 +41,8 @@ else "Integration tests are disabled.", ) +INTEGRATION_TEST_MODEL = "gpt-4.1-nano" + def create_test_openai_assistants_client( mock_async_openai: MagicMock, @@ -117,7 +117,7 @@ def mock_async_openai() -> MagicMock: return mock_client -def test_openai_assistants_client_init_with_client(mock_async_openai: MagicMock) -> None: +def test_init_with_client(mock_async_openai: MagicMock) -> None: """Test OpenAIAssistantsClient initialization with existing client.""" chat_client = create_test_openai_assistants_client( mock_async_openai, model_id="gpt-4", assistant_id="existing-assistant-id", thread_id="test-thread-id" @@ -131,7 +131,7 @@ def test_openai_assistants_client_init_with_client(mock_async_openai: MagicMock) assert isinstance(chat_client, ChatClientProtocol) -def test_openai_assistants_client_init_auto_create_client( +def test_init_auto_create_client( openai_unit_test_env: dict[str, str], mock_async_openai: MagicMock, ) -> None: @@ -151,7 +151,7 @@ def test_openai_assistants_client_init_auto_create_client( assert not chat_client._should_delete_assistant # type: ignore -def test_openai_assistants_client_init_validation_fail() -> None: +def test_init_validation_fail() -> None: """Test OpenAIAssistantsClient initialization with validation failure.""" with pytest.raises(ServiceInitializationError): # Force failure by providing invalid model ID type - this should cause validation to fail @@ -159,7 +159,7 @@ def test_openai_assistants_client_init_validation_fail() -> None: @pytest.mark.parametrize("exclude_list", [["OPENAI_CHAT_MODEL_ID"]], indirect=True) -def test_openai_assistants_client_init_missing_model_id(openai_unit_test_env: dict[str, str]) -> None: +def test_init_missing_model_id(openai_unit_test_env: dict[str, str]) -> None: """Test OpenAIAssistantsClient initialization with missing model ID.""" with pytest.raises(ServiceInitializationError): OpenAIAssistantsClient( @@ -168,13 +168,13 @@ def test_openai_assistants_client_init_missing_model_id(openai_unit_test_env: di @pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) -def test_openai_assistants_client_init_missing_api_key(openai_unit_test_env: dict[str, str]) -> None: +def test_init_missing_api_key(openai_unit_test_env: dict[str, str]) -> None: """Test OpenAIAssistantsClient initialization with missing API key.""" with pytest.raises(ServiceInitializationError): OpenAIAssistantsClient(model_id="gpt-4", env_file_path="nonexistent.env") -def test_openai_assistants_client_init_with_default_headers(openai_unit_test_env: dict[str, str]) -> None: +def test_init_with_default_headers(openai_unit_test_env: dict[str, str]) -> None: """Test OpenAIAssistantsClient initialization with default headers.""" default_headers = {"X-Unit-Test": "test-guid"} @@ -193,7 +193,7 @@ def test_openai_assistants_client_init_with_default_headers(openai_unit_test_env assert chat_client.client.default_headers[key] == value -async def test_openai_assistants_client_get_assistant_id_or_create_existing_assistant( +async def test_get_assistant_id_or_create_existing_assistant( mock_async_openai: MagicMock, ) -> None: """Test _get_assistant_id_or_create when assistant_id is already provided.""" @@ -206,7 +206,7 @@ async def test_openai_assistants_client_get_assistant_id_or_create_existing_assi mock_async_openai.beta.assistants.create.assert_not_called() -async def test_openai_assistants_client_get_assistant_id_or_create_create_new( +async def test_get_assistant_id_or_create_create_new( mock_async_openai: MagicMock, ) -> None: """Test _get_assistant_id_or_create when creating a new assistant.""" @@ -221,7 +221,7 @@ async def test_openai_assistants_client_get_assistant_id_or_create_create_new( mock_async_openai.beta.assistants.create.assert_called_once() -async def test_openai_assistants_client_aclose_should_not_delete( +async def test_aclose_should_not_delete( mock_async_openai: MagicMock, ) -> None: """Test close when assistant should not be deleted.""" @@ -236,7 +236,7 @@ async def test_openai_assistants_client_aclose_should_not_delete( assert not chat_client._should_delete_assistant # type: ignore -async def test_openai_assistants_client_aclose_should_delete(mock_async_openai: MagicMock) -> None: +async def test_aclose_should_delete(mock_async_openai: MagicMock) -> None: """Test close method calls cleanup.""" chat_client = create_test_openai_assistants_client( mock_async_openai, assistant_id="assistant-to-delete", should_delete_assistant=True @@ -249,7 +249,7 @@ async def test_openai_assistants_client_aclose_should_delete(mock_async_openai: assert not chat_client._should_delete_assistant # type: ignore -async def test_openai_assistants_client_async_context_manager(mock_async_openai: MagicMock) -> None: +async def test_async_context_manager(mock_async_openai: MagicMock) -> None: """Test async context manager functionality.""" chat_client = create_test_openai_assistants_client( mock_async_openai, assistant_id="assistant-to-delete", should_delete_assistant=True @@ -263,7 +263,7 @@ async def test_openai_assistants_client_async_context_manager(mock_async_openai: mock_async_openai.beta.assistants.delete.assert_called_once_with("assistant-to-delete") -def test_openai_assistants_client_serialize(openai_unit_test_env: dict[str, str]) -> None: +def test_serialize(openai_unit_test_env: dict[str, str]) -> None: """Test serialization of OpenAIAssistantsClient.""" default_headers = {"X-Unit-Test": "test-guid"} @@ -294,7 +294,7 @@ def test_openai_assistants_client_serialize(openai_unit_test_env: dict[str, str] assert "User-Agent" not in dumped_settings["default_headers"] -async def test_openai_assistants_client_get_active_thread_run_none_thread_id(mock_async_openai: MagicMock) -> None: +async def test_get_active_thread_run_none_thread_id(mock_async_openai: MagicMock) -> None: """Test _get_active_thread_run with None thread_id returns None.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -305,7 +305,7 @@ async def test_openai_assistants_client_get_active_thread_run_none_thread_id(moc mock_async_openai.beta.threads.runs.list.assert_not_called() -async def test_openai_assistants_client_get_active_thread_run_with_active_run(mock_async_openai: MagicMock) -> None: +async def test_get_active_thread_run_with_active_run(mock_async_openai: MagicMock) -> None: """Test _get_active_thread_run finds an active run.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -326,7 +326,7 @@ async def mock_runs_list(*args: Any, **kwargs: Any) -> Any: mock_async_openai.beta.threads.runs.list.assert_called_once_with(thread_id="thread-123", limit=1, order="desc") -async def test_openai_assistants_client_prepare_thread_create_new(mock_async_openai: MagicMock) -> None: +async def test_prepare_thread_create_new(mock_async_openai: MagicMock) -> None: """Test _prepare_thread creates new thread when thread_id is None.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -353,7 +353,7 @@ async def test_openai_assistants_client_prepare_thread_create_new(mock_async_ope ) -async def test_openai_assistants_client_prepare_thread_cancel_existing_run(mock_async_openai: MagicMock) -> None: +async def test_prepare_thread_cancel_existing_run(mock_async_openai: MagicMock) -> None: """Test _prepare_thread cancels existing run when provided.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -369,7 +369,7 @@ async def test_openai_assistants_client_prepare_thread_cancel_existing_run(mock_ mock_async_openai.beta.threads.runs.cancel.assert_called_once_with(run_id="run-456", thread_id="thread-123") -async def test_openai_assistants_client_prepare_thread_existing_no_run(mock_async_openai: MagicMock) -> None: +async def test_prepare_thread_existing_no_run(mock_async_openai: MagicMock) -> None: """Test _prepare_thread with existing thread_id but no active run.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -382,7 +382,7 @@ async def test_openai_assistants_client_prepare_thread_existing_no_run(mock_asyn mock_async_openai.beta.threads.runs.cancel.assert_not_called() -async def test_openai_assistants_client_process_stream_events_thread_run_created(mock_async_openai: MagicMock) -> None: +async def test_process_stream_events_thread_run_created(mock_async_openai: MagicMock) -> None: """Test _process_stream_events with thread.run.created event.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -415,7 +415,7 @@ async def async_iterator() -> Any: assert update.raw_representation == mock_response.data -async def test_openai_assistants_client_process_stream_events_message_delta_text(mock_async_openai: MagicMock) -> None: +async def test_process_stream_events_message_delta_text(mock_async_openai: MagicMock) -> None: """Test _process_stream_events with thread.message.delta event containing text.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -459,7 +459,7 @@ async def async_iterator() -> Any: assert update.raw_representation == mock_message_delta -async def test_openai_assistants_client_process_stream_events_requires_action(mock_async_openai: MagicMock) -> None: +async def test_process_stream_events_requires_action(mock_async_openai: MagicMock) -> None: """Test _process_stream_events with thread.run.requires_action event.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -502,7 +502,7 @@ async def async_iterator() -> Any: chat_client._parse_function_calls_from_assistants.assert_called_once_with(mock_run, None) # type: ignore -async def test_openai_assistants_client_process_stream_events_run_step_created(mock_async_openai: MagicMock) -> None: +async def test_process_stream_events_run_step_created(mock_async_openai: MagicMock) -> None: """Test _process_stream_events with thread.run.step.created event.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -534,7 +534,7 @@ async def async_iterator() -> Any: assert len(updates) == 0 -async def test_openai_assistants_client_process_stream_events_run_completed_with_usage( +async def test_process_stream_events_run_completed_with_usage( mock_async_openai: MagicMock, ) -> None: """Test _process_stream_events with thread.run.completed event containing usage.""" @@ -585,7 +585,7 @@ async def async_iterator() -> Any: assert update.raw_representation == mock_run -def test_openai_assistants_client_parse_function_calls_from_assistants_basic(mock_async_openai: MagicMock) -> None: +def test_parse_function_calls_from_assistants_basic(mock_async_openai: MagicMock) -> None: """Test _parse_function_calls_from_assistants with a simple function call.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -614,22 +614,22 @@ def test_openai_assistants_client_parse_function_calls_from_assistants_basic(moc assert contents[0].arguments == {"location": "Seattle"} -def test_openai_assistants_client_prepare_options_basic(mock_async_openai: MagicMock) -> None: +def test_prepare_options_basic(mock_async_openai: MagicMock) -> None: """Test _prepare_options with basic chat options.""" chat_client = create_test_openai_assistants_client(mock_async_openai) - # Create basic chat options - chat_options = ChatOptions( - max_tokens=100, - model_id="gpt-4", - temperature=0.7, - top_p=0.9, - ) + # Create basic chat options as a dict + options = { + "max_tokens": 100, + "model_id": "gpt-4", + "temperature": 0.7, + "top_p": 0.9, + } messages = [ChatMessage(role=Role.USER, text="Hello")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore # Check basic options were set assert run_options["max_completion_tokens"] == 100 @@ -639,7 +639,7 @@ def test_openai_assistants_client_prepare_options_basic(mock_async_openai: Magic assert tool_results is None -def test_openai_assistants_client_prepare_options_with_ai_function_tool(mock_async_openai: MagicMock) -> None: +def test_prepare_options_with_ai_function_tool(mock_async_openai: MagicMock) -> None: """Test _prepare_options with AIFunction tool.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -650,15 +650,15 @@ def test_function(query: str) -> str: """A test function.""" return f"Result for {query}" - chat_options = ChatOptions( - tools=[test_function], - tool_choice="auto", - ) + options = { + "tools": [test_function], + "tool_choice": "auto", + } messages = [ChatMessage(role=Role.USER, text="Hello")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore # Check tools were set correctly assert "tools" in run_options @@ -668,22 +668,22 @@ def test_function(query: str) -> str: assert run_options["tool_choice"] == "auto" -def test_openai_assistants_client_prepare_options_with_code_interpreter(mock_async_openai: MagicMock) -> None: +def test_prepare_options_with_code_interpreter(mock_async_openai: MagicMock) -> None: """Test _prepare_options with HostedCodeInterpreterTool.""" chat_client = create_test_openai_assistants_client(mock_async_openai) # Create a real HostedCodeInterpreterTool code_tool = HostedCodeInterpreterTool() - chat_options = ChatOptions( - tools=[code_tool], - tool_choice="auto", - ) + options = { + "tools": [code_tool], + "tool_choice": "auto", + } messages = [ChatMessage(role=Role.USER, text="Calculate something")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore # Check code interpreter tool was set correctly assert "tools" in run_options @@ -692,39 +692,39 @@ def test_openai_assistants_client_prepare_options_with_code_interpreter(mock_asy assert run_options["tool_choice"] == "auto" -def test_openai_assistants_client_prepare_options_tool_choice_none(mock_async_openai: MagicMock) -> None: +def test_prepare_options_tool_choice_none(mock_async_openai: MagicMock) -> None: """Test _prepare_options with tool_choice set to 'none'.""" chat_client = create_test_openai_assistants_client(mock_async_openai) - chat_options = ChatOptions( - tool_choice="none", - ) + options = { + "tool_choice": "none", + } messages = [ChatMessage(role=Role.USER, text="Hello")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore # Should set tool_choice to none and not include tools assert run_options["tool_choice"] == "none" assert "tools" not in run_options -def test_openai_assistants_client_prepare_options_required_function(mock_async_openai: MagicMock) -> None: +def test_prepare_options_required_function(mock_async_openai: MagicMock) -> None: """Test _prepare_options with required function tool choice.""" chat_client = create_test_openai_assistants_client(mock_async_openai) - # Create a required function tool choice - tool_choice = ToolMode(mode="required", required_function_name="specific_function") + # Create a required function tool choice as dict + tool_choice = {"mode": "required", "required_function_name": "specific_function"} - chat_options = ChatOptions( - tool_choice=tool_choice, - ) + options = { + "tool_choice": tool_choice, + } messages = [ChatMessage(role=Role.USER, text="Hello")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore # Check required function tool choice was set correctly expected_tool_choice = { @@ -734,7 +734,7 @@ def test_openai_assistants_client_prepare_options_required_function(mock_async_o assert run_options["tool_choice"] == expected_tool_choice -def test_openai_assistants_client_prepare_options_with_file_search_tool(mock_async_openai: MagicMock) -> None: +def test_prepare_options_with_file_search_tool(mock_async_openai: MagicMock) -> None: """Test _prepare_options with HostedFileSearchTool.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -742,15 +742,15 @@ def test_openai_assistants_client_prepare_options_with_file_search_tool(mock_asy # Create a HostedFileSearchTool with max_results file_search_tool = HostedFileSearchTool(max_results=10) - chat_options = ChatOptions( - tools=[file_search_tool], - tool_choice="auto", - ) + options = { + "tools": [file_search_tool], + "tool_choice": "auto", + } messages = [ChatMessage(role=Role.USER, text="Search for information")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore # Check file search tool was set correctly assert "tools" in run_options @@ -760,22 +760,22 @@ def test_openai_assistants_client_prepare_options_with_file_search_tool(mock_asy assert run_options["tool_choice"] == "auto" -def test_openai_assistants_client_prepare_options_with_mapping_tool(mock_async_openai: MagicMock) -> None: +def test_prepare_options_with_mapping_tool(mock_async_openai: MagicMock) -> None: """Test _prepare_options with MutableMapping tool.""" chat_client = create_test_openai_assistants_client(mock_async_openai) # Create a tool as a MutableMapping (dict) mapping_tool = {"type": "custom_tool", "parameters": {"setting": "value"}} - chat_options = ChatOptions( - tools=[mapping_tool], # type: ignore - tool_choice="auto", - ) + options = { + "tools": [mapping_tool], # type: ignore + "tool_choice": "auto", + } messages = [ChatMessage(role=Role.USER, text="Use custom tool")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore # Check mapping tool was set correctly assert "tools" in run_options @@ -784,7 +784,7 @@ def test_openai_assistants_client_prepare_options_with_mapping_tool(mock_async_o assert run_options["tool_choice"] == "auto" -def test_openai_assistants_client_prepare_options_with_system_message(mock_async_openai: MagicMock) -> None: +def test_prepare_options_with_system_message(mock_async_openai: MagicMock) -> None: """Test _prepare_options with system message converted to instructions.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -794,7 +794,7 @@ def test_openai_assistants_client_prepare_options_with_system_message(mock_async ] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, None) # type: ignore + run_options, tool_results = chat_client._prepare_options(messages, {}) # type: ignore # Check that additional_messages only contains the user message # System message should be converted to instructions (though this is handled internally) @@ -803,7 +803,7 @@ def test_openai_assistants_client_prepare_options_with_system_message(mock_async assert run_options["additional_messages"][0]["role"] == "user" -def test_openai_assistants_client_prepare_options_with_image_content(mock_async_openai: MagicMock) -> None: +def test_prepare_options_with_image_content(mock_async_openai: MagicMock) -> None: """Test _prepare_options with image content.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -813,7 +813,7 @@ def test_openai_assistants_client_prepare_options_with_image_content(mock_async_ messages = [ChatMessage(role=Role.USER, contents=[image_content])] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, None) # type: ignore + run_options, tool_results = chat_client._prepare_options(messages, {}) # type: ignore # Check that image content was processed assert "additional_messages" in run_options @@ -825,7 +825,7 @@ def test_openai_assistants_client_prepare_options_with_image_content(mock_async_ assert message["content"][0]["image_url"]["url"] == "https://example.com/image.jpg" -def test_openai_assistants_client_prepare_tool_outputs_for_assistants_empty(mock_async_openai: MagicMock) -> None: +def test_prepare_tool_outputs_for_assistants_empty(mock_async_openai: MagicMock) -> None: """Test _prepare_tool_outputs_for_assistants with empty list.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -835,7 +835,7 @@ def test_openai_assistants_client_prepare_tool_outputs_for_assistants_empty(mock assert tool_outputs is None -def test_openai_assistants_client_prepare_tool_outputs_for_assistants_valid(mock_async_openai: MagicMock) -> None: +def test_prepare_tool_outputs_for_assistants_valid(mock_async_openai: MagicMock) -> None: """Test _prepare_tool_outputs_for_assistants with valid function results.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -851,7 +851,7 @@ def test_openai_assistants_client_prepare_tool_outputs_for_assistants_valid(mock assert tool_outputs[0].get("output") == "Function executed successfully" -def test_openai_assistants_client_prepare_tool_outputs_for_assistants_mismatched_run_ids( +def test_prepare_tool_outputs_for_assistants_mismatched_run_ids( mock_async_openai: MagicMock, ) -> None: """Test _prepare_tool_outputs_for_assistants with mismatched run IDs.""" @@ -872,7 +872,7 @@ def test_openai_assistants_client_prepare_tool_outputs_for_assistants_mismatched assert tool_outputs[0].get("tool_call_id") == "call-456" -def test_openai_assistants_client_update_agent_name_and_description(mock_async_openai: MagicMock) -> None: +def test_update_agent_name_and_description(mock_async_openai: MagicMock) -> None: """Test _update_agent_name_and_description method updates assistant_name when not already set.""" # Test updating agent name when assistant_name is None chat_client = create_test_openai_assistants_client(mock_async_openai, assistant_name=None) @@ -883,7 +883,7 @@ def test_openai_assistants_client_update_agent_name_and_description(mock_async_o assert chat_client.assistant_name == "New Assistant Name" -def test_openai_assistants_client_update_agent_name_and_description_existing(mock_async_openai: MagicMock) -> None: +def test_update_agent_name_and_description_existing(mock_async_openai: MagicMock) -> None: """Test _update_agent_name_and_description method doesn't override existing assistant_name.""" # Test that existing assistant_name is not overridden chat_client = create_test_openai_assistants_client(mock_async_openai, assistant_name="Existing Assistant") @@ -895,7 +895,7 @@ def test_openai_assistants_client_update_agent_name_and_description_existing(moc assert chat_client.assistant_name == "Existing Assistant" -def test_openai_assistants_client_update_agent_name_and_description_none(mock_async_openai: MagicMock) -> None: +def test_update_agent_name_and_description_none(mock_async_openai: MagicMock) -> None: """Test _update_agent_name_and_description method with None agent_name parameter.""" # Test that None agent_name doesn't change anything chat_client = create_test_openai_assistants_client(mock_async_openai, assistant_name=None) @@ -916,9 +916,9 @@ def get_weather( @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_assistants_client_get_response() -> None: +async def test_get_response() -> None: """Test OpenAI Assistants Client response.""" - async with OpenAIAssistantsClient() as openai_assistants_client: + async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: assert isinstance(openai_assistants_client, ChatClientProtocol) messages: list[ChatMessage] = [] @@ -941,9 +941,9 @@ async def test_openai_assistants_client_get_response() -> None: @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_assistants_client_get_response_tools() -> None: +async def test_get_response_tools() -> None: """Test OpenAI Assistants Client response with tools.""" - async with OpenAIAssistantsClient() as openai_assistants_client: + async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: assert isinstance(openai_assistants_client, ChatClientProtocol) messages: list[ChatMessage] = [] @@ -952,8 +952,7 @@ async def test_openai_assistants_client_get_response_tools() -> None: # Test that the client can be used to get a response response = await openai_assistants_client.get_response( messages=messages, - tools=[get_weather], - tool_choice="auto", + options={"tools": [get_weather], "tool_choice": "auto"}, ) assert response is not None @@ -963,9 +962,9 @@ async def test_openai_assistants_client_get_response_tools() -> None: @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_assistants_client_streaming() -> None: +async def test_streaming() -> None: """Test OpenAI Assistants Client streaming response.""" - async with OpenAIAssistantsClient() as openai_assistants_client: + async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: assert isinstance(openai_assistants_client, ChatClientProtocol) messages: list[ChatMessage] = [] @@ -994,9 +993,9 @@ async def test_openai_assistants_client_streaming() -> None: @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_assistants_client_streaming_tools() -> None: +async def test_streaming_tools() -> None: """Test OpenAI Assistants Client streaming response with tools.""" - async with OpenAIAssistantsClient() as openai_assistants_client: + async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: assert isinstance(openai_assistants_client, ChatClientProtocol) messages: list[ChatMessage] = [] @@ -1005,8 +1004,10 @@ async def test_openai_assistants_client_streaming_tools() -> None: # Test that the client can be used to get a response response = openai_assistants_client.get_streaming_response( messages=messages, - tools=[get_weather], - tool_choice="auto", + options={ + "tools": [get_weather], + "tool_choice": "auto", + }, ) full_message: str = "" async for chunk in response: @@ -1021,10 +1022,10 @@ async def test_openai_assistants_client_streaming_tools() -> None: @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_assistants_client_with_existing_assistant() -> None: +async def test_with_existing_assistant() -> None: """Test OpenAI Assistants Client with existing assistant ID.""" # First create an assistant to use in the test - async with OpenAIAssistantsClient() as temp_client: + async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as temp_client: # Get the assistant ID by triggering assistant creation messages = [ChatMessage(role="user", text="Hello")] await temp_client.get_response(messages=messages) @@ -1032,7 +1033,7 @@ async def test_openai_assistants_client_with_existing_assistant() -> None: # Now test using the existing assistant async with OpenAIAssistantsClient( - model_id="gpt-4o-mini", assistant_id=assistant_id + model_id=INTEGRATION_TEST_MODEL, assistant_id=assistant_id ) as openai_assistants_client: assert isinstance(openai_assistants_client, ChatClientProtocol) assert openai_assistants_client.assistant_id == assistant_id @@ -1050,9 +1051,9 @@ async def test_openai_assistants_client_with_existing_assistant() -> None: @pytest.mark.flaky @skip_if_openai_integration_tests_disabled @pytest.mark.skip(reason="OpenAI file search functionality is currently broken - tracked in GitHub issue") -async def test_openai_assistants_client_file_search() -> None: +async def test_file_search() -> None: """Test OpenAI Assistants Client response.""" - async with OpenAIAssistantsClient() as openai_assistants_client: + async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: assert isinstance(openai_assistants_client, ChatClientProtocol) messages: list[ChatMessage] = [] @@ -1061,8 +1062,10 @@ async def test_openai_assistants_client_file_search() -> None: file_id, vector_store = await create_vector_store(openai_assistants_client) response = await openai_assistants_client.get_response( messages=messages, - tools=[HostedFileSearchTool()], - tool_resources={"file_search": {"vector_store_ids": [vector_store.vector_store_id]}}, + options={ + "tools": [HostedFileSearchTool()], + "tool_resources": {"file_search": {"vector_store_ids": [vector_store.vector_store_id]}}, + }, ) await delete_vector_store(openai_assistants_client, file_id, vector_store.vector_store_id) @@ -1074,9 +1077,9 @@ async def test_openai_assistants_client_file_search() -> None: @pytest.mark.flaky @skip_if_openai_integration_tests_disabled @pytest.mark.skip(reason="OpenAI file search functionality is currently broken - tracked in GitHub issue") -async def test_openai_assistants_client_file_search_streaming() -> None: +async def test_file_search_streaming() -> None: """Test OpenAI Assistants Client response.""" - async with OpenAIAssistantsClient() as openai_assistants_client: + async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: assert isinstance(openai_assistants_client, ChatClientProtocol) messages: list[ChatMessage] = [] @@ -1085,8 +1088,10 @@ async def test_openai_assistants_client_file_search_streaming() -> None: file_id, vector_store = await create_vector_store(openai_assistants_client) response = openai_assistants_client.get_streaming_response( messages=messages, - tools=[HostedFileSearchTool()], - tool_resources={"file_search": {"vector_store_ids": [vector_store.vector_store_id]}}, + options={ + "tools": [HostedFileSearchTool()], + "tool_resources": {"file_search": {"vector_store_ids": [vector_store.vector_store_id]}}, + }, ) assert response is not None @@ -1107,7 +1112,7 @@ async def test_openai_assistants_client_file_search_streaming() -> None: async def test_openai_assistants_agent_basic_run(): """Test ChatAgent basic run functionality with OpenAIAssistantsClient.""" async with ChatAgent( - chat_client=OpenAIAssistantsClient(), + chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), ) as agent: # Run a simple query response = await agent.run("Hello! Please respond with 'Hello World' exactly.") @@ -1124,7 +1129,7 @@ async def test_openai_assistants_agent_basic_run(): async def test_openai_assistants_agent_basic_run_streaming(): """Test ChatAgent basic streaming functionality with OpenAIAssistantsClient.""" async with ChatAgent( - chat_client=OpenAIAssistantsClient(), + chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), ) as agent: # Run streaming query full_message: str = "" @@ -1144,7 +1149,7 @@ async def test_openai_assistants_agent_basic_run_streaming(): async def test_openai_assistants_agent_thread_persistence(): """Test ChatAgent thread persistence across runs with OpenAIAssistantsClient.""" async with ChatAgent( - chat_client=OpenAIAssistantsClient(), + chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful assistant with good memory.", ) as agent: # Create a new thread that will be reused @@ -1176,7 +1181,7 @@ async def test_openai_assistants_agent_existing_thread_id(): existing_thread_id = None async with ChatAgent( - chat_client=OpenAIAssistantsClient(), + chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful weather agent.", tools=[get_weather], ) as agent: @@ -1219,7 +1224,7 @@ async def test_openai_assistants_agent_code_interpreter(): """Test ChatAgent with code interpreter through OpenAIAssistantsClient.""" async with ChatAgent( - chat_client=OpenAIAssistantsClient(), + chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful assistant that can write and execute Python code.", tools=[HostedCodeInterpreterTool()], ) as agent: @@ -1235,11 +1240,11 @@ async def test_openai_assistants_agent_code_interpreter(): @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_assistants_client_agent_level_tool_persistence(): +async def test_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with OpenAI Assistants Client.""" async with ChatAgent( - chat_client=OpenAIAssistantsClient(), + chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], # Agent-level tool ) as agent: @@ -1261,7 +1266,7 @@ async def test_openai_assistants_client_agent_level_tool_persistence(): # Callable API Key Tests -def test_openai_assistants_client_with_callable_api_key() -> None: +def test_with_callable_api_key() -> None: """Test OpenAIAssistantsClient initialization with callable API key.""" async def get_api_key() -> str: diff --git a/python/packages/core/tests/openai/test_openai_chat_client.py b/python/packages/core/tests/openai/test_openai_chat_client.py index 18854799fd..1f1d624345 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client.py +++ b/python/packages/core/tests/openai/test_openai_chat_client.py @@ -1,25 +1,22 @@ # Copyright (c) Microsoft. All rights reserved. +import json import os -from typing import Annotated +from typing import Any from unittest.mock import MagicMock, patch import pytest from openai import BadRequestError +from pydantic import BaseModel +from pytest import param from agent_framework import ( - AgentRunResponse, - AgentRunResponseUpdate, - ChatAgent, ChatClientProtocol, ChatMessage, - ChatOptions, ChatResponse, - ChatResponseUpdate, DataContent, FunctionResultContent, HostedWebSearchTool, - TextContent, ToolProtocol, ai_function, prepare_function_call_results, @@ -170,7 +167,7 @@ async def test_content_filter_exception_handling(openai_unit_test_env: dict[str, patch.object(client.client.chat.completions, "create", side_effect=mock_error), pytest.raises(OpenAIContentFilterException), ): - await client._inner_get_response(messages=messages, chat_options=ChatOptions()) # type: ignore + await client._inner_get_response(messages=messages, options={}) # type: ignore def test_unsupported_tool_handling(openai_unit_test_env: dict[str, str]) -> None: @@ -183,12 +180,12 @@ def test_unsupported_tool_handling(openai_unit_test_env: dict[str, str]) -> None # This should ignore the unsupported ToolProtocol and return empty list result = client._prepare_tools_for_openai([unsupported_tool]) # type: ignore - assert result == [] + assert result == {} # Also test with a non-ToolProtocol that should be converted to dict dict_tool = {"type": "function", "name": "test"} result = client._prepare_tools_for_openai([dict_tool]) # type: ignore - assert result == [dict_tool] + assert result["tools"] == [dict_tool] @ai_function @@ -208,407 +205,6 @@ def get_weather(location: str) -> str: return f"The weather in {location} is sunny and 72°F." -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_completion_response() -> None: - """Test OpenAI chat completion responses.""" - openai_chat_client = OpenAIChatClient() - - assert isinstance(openai_chat_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append( - ChatMessage( - role="user", - text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " - "Bonded by their love for the natural world and shared curiosity, they uncovered a " - "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " - "of climate change.", - ) - ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) - - # Test that the client can be used to get a response - response = await openai_chat_client.get_response(messages=messages) - - assert response is not None - assert isinstance(response, ChatResponse) - assert "scientists" in response.text - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_completion_response_params() -> None: - """Test OpenAI chat completion responses.""" - openai_chat_client = OpenAIChatClient() - - assert isinstance(openai_chat_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append( - ChatMessage( - role="user", - text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " - "Bonded by their love for the natural world and shared curiosity, they uncovered a " - "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " - "of climate change.", - ) - ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) - - # Test that the client can be used to get a response - response = await openai_chat_client.get_response( - messages=messages, chat_options=ChatOptions(max_tokens=150, temperature=0.7, top_p=0.9) - ) - - assert response is not None - assert isinstance(response, ChatResponse) - assert "scientists" in response.text - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_completion_response_tools() -> None: - """Test OpenAI chat completion responses.""" - openai_chat_client = OpenAIChatClient() - - assert isinstance(openai_chat_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="who are Emily and David?")) - - # Test that the client can be used to get a response - response = await openai_chat_client.get_response( - messages=messages, - tools=[get_story_text], - tool_choice="auto", - ) - - assert response is not None - assert isinstance(response, ChatResponse) - assert "scientists" in response.text - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_streaming() -> None: - """Test Azure OpenAI chat completion responses.""" - openai_chat_client = OpenAIChatClient() - - assert isinstance(openai_chat_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append( - ChatMessage( - role="user", - text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " - "Bonded by their love for the natural world and shared curiosity, they uncovered a " - "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " - "of climate change.", - ) - ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) - - # Test that the client can be used to get a response - response = openai_chat_client.get_streaming_response(messages=messages) - - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - assert chunk.message_id is not None - assert chunk.response_id is not None - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - - assert "scientists" in full_message - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_streaming_tools() -> None: - """Test AzureOpenAI chat completion responses.""" - openai_chat_client = OpenAIChatClient() - - assert isinstance(openai_chat_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="who are Emily and David?")) - - # Test that the client can be used to get a response - response = openai_chat_client.get_streaming_response( - messages=messages, - tools=[get_story_text], - tool_choice="auto", - ) - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - - assert "scientists" in full_message - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_web_search() -> None: - # Currently only a select few models support web search tool calls - openai_chat_client = OpenAIChatClient(model_id="gpt-4o-search-preview") - - assert isinstance(openai_chat_client, ChatClientProtocol) - - # Test that the client will use the web search tool - response = await openai_chat_client.get_response( - messages=[ - ChatMessage( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", - ) - ], - tools=[HostedWebSearchTool()], - tool_choice="auto", - ) - - assert response is not None - assert isinstance(response, ChatResponse) - assert "Rumi" in response.text - assert "Mira" in response.text - assert "Zoey" in response.text - - # Test that the client will use the web search tool with location - additional_properties = { - "user_location": { - "country": "US", - "city": "Seattle", - } - } - response = await openai_chat_client.get_response( - messages=[ChatMessage(role="user", text="What is the current weather? Do not ask for my current location.")], - tools=[HostedWebSearchTool(additional_properties=additional_properties)], - tool_choice="auto", - ) - assert response.text is not None - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_web_search_streaming() -> None: - openai_chat_client = OpenAIChatClient(model_id="gpt-4o-search-preview") - - assert isinstance(openai_chat_client, ChatClientProtocol) - - # Test that the client will use the web search tool - response = openai_chat_client.get_streaming_response( - messages=[ - ChatMessage( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", - ) - ], - tools=[HostedWebSearchTool()], - tool_choice="auto", - ) - - assert response is not None - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - assert "Rumi" in full_message - assert "Mira" in full_message - assert "Zoey" in full_message - - # Test that the client will use the web search tool with location - additional_properties = { - "user_location": { - "country": "US", - "city": "Seattle", - } - } - response = openai_chat_client.get_streaming_response( - messages=[ChatMessage(role="user", text="What is the current weather? Do not ask for my current location.")], - tools=[HostedWebSearchTool(additional_properties=additional_properties)], - tool_choice="auto", - ) - assert response is not None - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - assert full_message is not None - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_agent_basic_run(): - """Test OpenAI chat client agent basic run functionality with OpenAIChatClient.""" - async with ChatAgent( - chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), - ) as agent: - # Test basic run - response = await agent.run("Hello! Please respond with 'Hello World' exactly.") - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - assert "hello world" in response.text.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_agent_basic_run_streaming(): - """Test OpenAI chat client agent basic streaming functionality with OpenAIChatClient.""" - async with ChatAgent( - chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), - ) as agent: - # Test streaming run - full_text = "" - async for chunk in agent.run_stream("Please respond with exactly: 'This is a streaming response test.'"): - assert isinstance(chunk, AgentRunResponseUpdate) - if chunk.text: - full_text += chunk.text - - assert len(full_text) > 0 - assert "streaming response test" in full_text.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_agent_thread_persistence(): - """Test OpenAI chat client agent thread persistence across runs with OpenAIChatClient.""" - async with ChatAgent( - chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), - instructions="You are a helpful assistant with good memory.", - ) as agent: - # Create a new thread that will be reused - thread = agent.get_new_thread() - - # First interaction - response1 = await agent.run("My name is Alice. Remember this.", thread=thread) - - assert isinstance(response1, AgentRunResponse) - assert response1.text is not None - - # Second interaction - test memory - response2 = await agent.run("What is my name?", thread=thread) - - assert isinstance(response2, AgentRunResponse) - assert response2.text is not None - assert "alice" in response2.text.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_agent_existing_thread(): - """Test OpenAI chat client agent with existing thread to continue conversations across agent instances.""" - # First conversation - capture the thread - preserved_thread = None - - async with ChatAgent( - chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), - instructions="You are a helpful assistant with good memory.", - ) as first_agent: - # Start a conversation and capture the thread - thread = first_agent.get_new_thread() - first_response = await first_agent.run("My name is Alice. Remember this.", thread=thread) - - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - - # Preserve the thread for reuse - preserved_thread = thread - - # Second conversation - reuse the thread in a new agent instance - if preserved_thread: - async with ChatAgent( - chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), - instructions="You are a helpful assistant with good memory.", - ) as second_agent: - # Reuse the preserved thread - second_response = await second_agent.run("What is my name?", thread=preserved_thread) - - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - assert "alice" in second_response.text.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_agent_level_tool_persistence(): - """Test that agent-level tools persist across multiple runs with OpenAI Chat Client.""" - - async with ChatAgent( - chat_client=OpenAIChatClient(model_id="gpt-4.1"), - instructions="You are a helpful assistant that uses available tools.", - tools=[get_weather], # Agent-level tool - ) as agent: - # First run - agent-level tool should be available - first_response = await agent.run("What's the weather like in Chicago?") - - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - # Should use the agent-level weather tool - assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) - - # Second run - agent-level tool should still be available (persistence test) - second_response = await agent.run("What's the weather in Miami?") - - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - # Should use the agent-level weather tool again - assert any(term in second_response.text.lower() for term in ["miami", "sunny", "72"]) - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_chat_client_run_level_tool_isolation(): - """Test that run-level tools are isolated to specific runs and don't persist with OpenAI Chat Client.""" - # Counter to track how many times the weather tool is called - call_count = 0 - - @ai_function - async def get_weather_with_counter(location: Annotated[str, "The location as a city name"]) -> str: - """Get the current weather in a given location.""" - nonlocal call_count - call_count += 1 - return f"The weather in {location} is sunny and 72°F." - - async with ChatAgent( - chat_client=OpenAIChatClient(model_id="gpt-4.1"), - instructions="You are a helpful assistant.", - ) as agent: - # First run - use run-level tool - first_response = await agent.run( - "What's the weather like in Chicago?", - tools=[get_weather_with_counter], # Run-level tool - ) - - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - # Should use the run-level weather tool (call count should be 1) - assert call_count == 1 - assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) - - # Second run - run-level tool should NOT persist (key isolation test) - second_response = await agent.run("What's the weather like in Miami?") - - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - # Should NOT use the weather tool since it was only run-level in previous call - # Call count should still be 1 (no additional calls) - assert call_count == 1 - - async def test_exception_message_includes_original_error_details() -> None: """Test that exception messages include original error details in the new format.""" client = OpenAIChatClient(model_id="test-model", api_key="test-key") @@ -627,7 +223,7 @@ async def test_exception_message_includes_original_error_details() -> None: patch.object(client.client.chat.completions, "create", side_effect=mock_error), pytest.raises(ServiceResponseException) as exc_info, ): - await client._inner_get_response(messages=messages, chat_options=ChatOptions()) # type: ignore + await client._inner_get_response(messages=messages, options={}) # type: ignore exception_message = str(exc_info.value) assert "service failed to complete the prompt:" in exception_message @@ -667,7 +263,7 @@ def test_chat_response_content_order_text_before_tool_calls(openai_unit_test_env ) client = OpenAIChatClient() - response = client._parse_response_from_openai(mock_response, ChatOptions()) + response = client._parse_response_from_openai(mock_response, {}) # Verify we have both text and tool call content assert len(response.messages) == 1 @@ -894,3 +490,191 @@ def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: assert result["type"] == "file" assert "filename" not in result["file"] # None filename should be omitted + + +# region Integration Tests + + +class OutputStruct(BaseModel): + """A structured output for testing purposes.""" + + location: str + weather: str | None = None + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +@pytest.mark.parametrize( + "option_name,option_value,needs_validation", + [ + # Simple ChatOptions - just verify they don't fail + param("temperature", 0.7, False, id="temperature"), + param("top_p", 0.9, False, id="top_p"), + param("max_tokens", 500, False, id="max_tokens"), + param("seed", 123, False, id="seed"), + param("user", "test-user-id", False, id="user"), + param("frequency_penalty", 0.5, False, id="frequency_penalty"), + param("presence_penalty", 0.3, False, id="presence_penalty"), + param("stop", ["END"], False, id="stop"), + param("allow_multiple_tool_calls", True, False, id="allow_multiple_tool_calls"), + # OpenAIChatOptions - just verify they don't fail + param("logit_bias", {"50256": -1}, False, id="logit_bias"), + param("prediction", {"type": "content", "content": "hello world"}, False, id="prediction"), + # Complex options requiring output validation + param("tools", [get_weather], True, id="tools_function"), + param("tool_choice", "auto", True, id="tool_choice_auto"), + param("tool_choice", "none", True, id="tool_choice_none"), + param("tool_choice", "required", True, id="tool_choice_required_any"), + param( + "tool_choice", + {"mode": "required", "required_function_name": "get_weather"}, + True, + id="tool_choice_required", + ), + param("response_format", OutputStruct, True, id="response_format_pydantic"), + param( + "response_format", + { + "type": "json_schema", + "json_schema": { + "name": "WeatherDigest", + "strict": True, + "schema": { + "title": "WeatherDigest", + "type": "object", + "properties": { + "location": {"type": "string"}, + "conditions": {"type": "string"}, + "temperature_c": {"type": "number"}, + "advisory": {"type": "string"}, + }, + "required": ["location", "conditions", "temperature_c", "advisory"], + "additionalProperties": False, + }, + }, + }, + True, + id="response_format_runtime_json_schema", + ), + ], +) +async def test_integration_options( + option_name: str, + option_value: Any, + needs_validation: bool, +) -> None: + """Parametrized test covering all ChatOptions and OpenAIChatOptions. + + Tests both streaming and non-streaming modes for each option to ensure + they don't cause failures. Options marked with needs_validation also + check that the feature actually works correctly. + """ + client = OpenAIChatClient() + # to ensure toolmode required does not endlessly loop + client.function_invocation_configuration.max_iterations = 1 + + for streaming in [False, True]: + # Prepare test message + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + # Use weather-related prompt for tool tests + messages = [ChatMessage(role="user", text="What is the weather in Seattle?")] + elif option_name.startswith("response_format"): + # Use prompt that works well with structured output + messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")] + messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + else: + # Generic prompt for simple options + messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + + # Build options dict + options: dict[str, Any] = {option_name: option_value} + + # Add tools if testing tool_choice to avoid errors + if option_name.startswith("tool_choice"): + options["tools"] = [get_weather] + + if streaming: + # Test streaming mode + response_gen = client.get_streaming_response( + messages=messages, + options=options, + ) + + output_format = option_value if option_name.startswith("response_format") else None + response = await ChatResponse.from_chat_response_generator(response_gen, output_format_type=output_format) + else: + # Test non-streaming mode + response = await client.get_response( + messages=messages, + options=options, + ) + + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None, f"No text in response for option '{option_name}'" + assert len(response.text) > 0, f"Empty response for option '{option_name}'" + + # Validate based on option type + if needs_validation: + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + # Should have called the weather function + text = response.text.lower() + assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" + elif option_name.startswith("response_format"): + if option_value == OutputStruct: + # Should have structured output + assert response.value is not None, "No structured output" + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + # Runtime JSON schema + assert response.value is None, "No structured output, can't parse any json." + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value + assert "seattle" in response_value["location"].lower() + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_integration_web_search() -> None: + client = OpenAIChatClient(model_id="gpt-4o-search-preview") + + for streaming in [False, True]: + content = { + "messages": "Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", + "options": { + "tool_choice": "auto", + "tools": [HostedWebSearchTool()], + }, + } + if streaming: + response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content)) + else: + response = await client.get_response(**content) + + assert response is not None + assert isinstance(response, ChatResponse) + assert "Rumi" in response.text + assert "Mira" in response.text + assert "Zoey" in response.text + + # Test that the client will use the web search tool with location + additional_properties = { + "user_location": { + "country": "US", + "city": "Seattle", + } + } + content = { + "messages": "What is the current weather? Do not ask for my current location.", + "options": { + "tool_choice": "auto", + "tools": [HostedWebSearchTool(additional_properties=additional_properties)], + }, + } + if streaming: + response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content)) + else: + response = await client.get_response(**content) + assert response.text is not None diff --git a/python/packages/core/tests/openai/test_openai_chat_client_base.py b/python/packages/core/tests/openai/test_openai_chat_client_base.py index 3e48899509..3c9a432db0 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client_base.py +++ b/python/packages/core/tests/openai/test_openai_chat_client_base.py @@ -115,7 +115,6 @@ async def test_cmc_no_fcc_in_response( openai_chat_completion = OpenAIChatClient() await openai_chat_completion.get_response( messages=chat_history, - arguments={}, ) mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], @@ -199,7 +198,7 @@ async def test_cmc_additional_properties( chat_history.append(ChatMessage(role="user", text="hello world")) openai_chat_completion = OpenAIChatClient() - await openai_chat_completion.get_response(messages=chat_history, additional_properties={"reasoning_effort": "low"}) + await openai_chat_completion.get_response(messages=chat_history, options={"reasoning_effort": "low"}) mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, @@ -382,8 +381,6 @@ def test_chat_response_created_at_uses_utc(openai_unit_test_env: dict[str, str]) This is a regression test for the issue where created_at was using local time but labeling it as UTC (with 'Z' suffix). """ - from agent_framework import ChatOptions - # Use a specific Unix timestamp: 1733011890 = 2024-12-01T00:31:30Z (UTC) # This ensures we test that the timestamp is actually converted to UTC utc_timestamp = 1733011890 @@ -399,7 +396,7 @@ def test_chat_response_created_at_uses_utc(openai_unit_test_env: dict[str, str]) ) client = OpenAIChatClient() - response = client._parse_response_from_openai(mock_response, ChatOptions()) + response = client._parse_response_from_openai(mock_response, {}) # Verify that created_at is correctly formatted as UTC assert response.created_at is not None diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index 778ce843ee..c91297d7df 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -2,28 +2,35 @@ import asyncio import base64 +import json import os from datetime import datetime, timezone -from typing import Annotated +from typing import Annotated, Any from unittest.mock import MagicMock, patch import pytest from openai import BadRequestError from openai.types.responses.response_reasoning_item import Summary -from openai.types.responses.response_reasoning_summary_text_delta_event import ResponseReasoningSummaryTextDeltaEvent -from openai.types.responses.response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent -from openai.types.responses.response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent -from openai.types.responses.response_reasoning_text_done_event import ResponseReasoningTextDoneEvent +from openai.types.responses.response_reasoning_summary_text_delta_event import ( + ResponseReasoningSummaryTextDeltaEvent, +) +from openai.types.responses.response_reasoning_summary_text_done_event import ( + ResponseReasoningSummaryTextDoneEvent, +) +from openai.types.responses.response_reasoning_text_delta_event import ( + ResponseReasoningTextDeltaEvent, +) +from openai.types.responses.response_reasoning_text_done_event import ( + ResponseReasoningTextDoneEvent, +) from openai.types.responses.response_text_delta_event import ResponseTextDeltaEvent from pydantic import BaseModel +from pytest import param from agent_framework import ( - AgentRunResponse, - AgentRunResponseUpdate, - AgentThread, - ChatAgent, ChatClientProtocol, ChatMessage, + ChatOptions, ChatResponse, ChatResponseUpdate, CodeInterpreterToolCallContent, @@ -42,15 +49,17 @@ HostedWebSearchTool, ImageGenerationToolCallContent, ImageGenerationToolResultContent, - MCPStreamableHTTPTool, Role, TextContent, TextReasoningContent, UriContent, ai_function, ) -from agent_framework._types import ChatOptions -from agent_framework.exceptions import ServiceInitializationError, ServiceInvalidRequestError, ServiceResponseException +from agent_framework.exceptions import ( + ServiceInitializationError, + ServiceInvalidRequestError, + ServiceResponseException, +) from agent_framework.openai import OpenAIResponsesClient from agent_framework.openai._exceptions import OpenAIContentFilterException @@ -70,10 +79,13 @@ class OutputStruct(BaseModel): weather: str | None = None -async def create_vector_store(client: OpenAIResponsesClient) -> tuple[str, HostedVectorStoreContent]: +async def create_vector_store( + client: OpenAIResponsesClient, +) -> tuple[str, HostedVectorStoreContent]: """Create a vector store with sample documents for testing.""" file = await client.client.files.create( - file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), purpose="user_data" + file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), + purpose="user_data", ) vector_store = await client.client.vector_stores.create( name="knowledge_base", @@ -217,25 +229,27 @@ def test_get_response_with_all_parameters() -> None: asyncio.run( client.get_response( messages=[ChatMessage(role="user", text="Test message")], - include=["message.output_text.logprobs"], - instructions="You are a helpful assistant", - max_tokens=100, - parallel_tool_calls=True, - model_id="gpt-4", - previous_response_id="prev-123", - reasoning={"chain_of_thought": "enabled"}, - service_tier="auto", - response_format=OutputStruct, - seed=42, - store=True, - temperature=0.7, - tool_choice="auto", - tools=[get_weather], - top_p=0.9, - user="test-user", - truncation="auto", - timeout=30.0, - additional_properties={"custom": "value"}, + options={ + "include": ["message.output_text.logprobs"], + "instructions": "You are a helpful assistant", + "max_tokens": 100, + "parallel_tool_calls": True, + "model_id": "gpt-4", + "previous_response_id": "prev-123", + "reasoning": {"chain_of_thought": "enabled"}, + "service_tier": "auto", + "response_format": OutputStruct, + "seed": 42, + "store": True, + "temperature": 0.7, + "tool_choice": "auto", + "tools": [get_weather], + "top_p": 0.9, + "user": "test-user", + "truncation": "auto", + "timeout": 30.0, + "additional_properties": {"custom": "value"}, + }, ) ) @@ -247,7 +261,12 @@ def test_web_search_tool_with_location() -> None: # Test web search tool with location web_search_tool = HostedWebSearchTool( additional_properties={ - "user_location": {"country": "US", "city": "Seattle", "region": "WA", "timezone": "America/Los_Angeles"} + "user_location": { + "country": "US", + "city": "Seattle", + "region": "WA", + "timezone": "America/Los_Angeles", + } } ) @@ -256,8 +275,7 @@ def test_web_search_tool_with_location() -> None: asyncio.run( client.get_response( messages=[ChatMessage(role="user", text="What's the weather?")], - tools=[web_search_tool], - tool_choice="auto", + options={"tools": [web_search_tool], "tool_choice": "auto"}, ) ) @@ -272,7 +290,10 @@ def test_file_search_tool_with_invalid_inputs() -> None: # Should raise an error due to invalid inputs with pytest.raises(ValueError, match="HostedFileSearchTool requires inputs to be of type"): asyncio.run( - client.get_response(messages=[ChatMessage(role="user", text="Search files")], tools=[file_search_tool]) + client.get_response( + messages=[ChatMessage(role="user", text="Search files")], + options={"tools": [file_search_tool]}, + ) ) @@ -285,7 +306,10 @@ def test_code_interpreter_tool_variations() -> None: with pytest.raises(ServiceResponseException): asyncio.run( - client.get_response(messages=[ChatMessage(role="user", text="Run some code")], tools=[code_tool_empty]) + client.get_response( + messages=[ChatMessage(role="user", text="Run some code")], + options={"tools": [code_tool_empty]}, + ) ) # Test code interpreter with files @@ -296,7 +320,8 @@ def test_code_interpreter_tool_variations() -> None: with pytest.raises(ServiceResponseException): asyncio.run( client.get_response( - messages=[ChatMessage(role="user", text="Process these files")], tools=[code_tool_with_files] + messages=[ChatMessage(role="user", text="Process these files")], + options={"tools": [code_tool_with_files]}, ) ) @@ -330,7 +355,10 @@ def test_hosted_file_search_tool_validation() -> None: with pytest.raises((ValueError, ServiceInvalidRequestError)): asyncio.run( - client.get_response(messages=[ChatMessage(role="user", text="Test")], tools=[empty_file_search_tool]) + client.get_response( + messages=[ChatMessage(role="user", text="Test")], + options={"tools": [empty_file_search_tool]}, + ) ) @@ -377,7 +405,8 @@ async def test_response_format_parse_path() -> None: with patch.object(client.client.responses, "parse", return_value=mock_parsed_response): response = await client.get_response( - messages=[ChatMessage(role="user", text="Test message")], response_format=OutputStruct, store=True + messages=[ChatMessage(role="user", text="Test message")], + options={"response_format": OutputStruct, "store": True}, ) assert response.response_id == "parsed_response_123" assert response.conversation_id == "parsed_response_123" @@ -403,7 +432,8 @@ async def test_response_format_parse_path_with_conversation_id() -> None: with patch.object(client.client.responses, "parse", return_value=mock_parsed_response): response = await client.get_response( - messages=[ChatMessage(role="user", text="Test message")], response_format=OutputStruct, store=True + messages=[ChatMessage(role="user", text="Test message")], + options={"response_format": OutputStruct, "store": True}, ) assert response.response_id == "parsed_response_123" assert response.conversation_id == "conversation_456" @@ -425,7 +455,8 @@ async def test_bad_request_error_non_content_filter() -> None: with patch.object(client.client.responses, "parse", side_effect=mock_error): with pytest.raises(ServiceResponseException) as exc_info: await client.get_response( - messages=[ChatMessage(role="user", text="Test message")], response_format=OutputStruct + messages=[ChatMessage(role="user", text="Test message")], + options={"response_format": OutputStruct}, ) assert "failed to complete the prompt" in str(exc_info.value) @@ -450,41 +481,6 @@ async def test_streaming_content_filter_exception_handling() -> None: break -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_get_streaming_response_with_all_parameters() -> None: - """Test get_streaming_response with all possible parameters.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - - # Should fail due to invalid API key - with pytest.raises(ServiceResponseException): - response = client.get_streaming_response( - messages=[ChatMessage(role="user", text="Test streaming")], - include=["file_search_call.results"], - instructions="Stream response test", - max_tokens=50, - parallel_tool_calls=False, - model_id="gpt-4", - previous_response_id="stream-prev-123", - reasoning={"mode": "stream"}, - service_tier="default", - response_format=OutputStruct, - seed=123, - store=False, - temperature=0.5, - tool_choice="none", - tools=[], - top_p=0.8, - user="stream-user", - truncation="last_messages", - timeout=15.0, - additional_properties={"stream_custom": "stream_value"}, - ) - # Just iterate once to trigger the logic - async for _ in response: - break - - def test_response_content_creation_with_annotations() -> None: """Test _parse_response_from_openai with different annotation types.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -517,7 +513,7 @@ def test_response_content_creation_with_annotations() -> None: mock_response.output = [mock_message_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert len(response.messages[0].contents) >= 1 assert isinstance(response.messages[0].contents[0], TextContent) @@ -548,7 +544,7 @@ def test_response_content_creation_with_refusal() -> None: mock_response.output = [mock_message_item] - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert len(response.messages[0].contents) == 1 assert isinstance(response.messages[0].contents[0], TextContent) @@ -578,7 +574,7 @@ def test_response_content_creation_with_reasoning() -> None: mock_response.output = [mock_reasoning_item] - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert len(response.messages[0].contents) == 2 assert isinstance(response.messages[0].contents[0], TextReasoningContent) @@ -614,7 +610,7 @@ def test_response_content_creation_with_code_interpreter() -> None: mock_response.output = [mock_code_interpreter_item] - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert len(response.messages[0].contents) == 2 call_content, result_content = response.messages[0].contents @@ -649,7 +645,7 @@ def test_response_content_creation_with_function_call() -> None: mock_response.output = [mock_function_call_item] - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert len(response.messages[0].contents) == 1 assert isinstance(response.messages[0].contents[0], FunctionCallContent) @@ -710,7 +706,7 @@ def test_parse_response_from_openai_with_mcp_approval_request() -> None: mock_response.output = [mock_item] - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert isinstance(response.messages[0].contents[0], FunctionApprovalRequestContent) req = response.messages[0].contents[0] @@ -720,7 +716,9 @@ def test_parse_response_from_openai_with_mcp_approval_request() -> None: assert req.function_call.additional_properties["server_label"] == "My_MCP" -def test_responses_client_created_at_uses_utc(openai_unit_test_env: dict[str, str]) -> None: +def test_responses_client_created_at_uses_utc( + openai_unit_test_env: dict[str, str], +) -> None: """Test that ChatResponse from responses client uses UTC timestamp. This is a regression test for the issue where created_at was using local time @@ -751,7 +749,7 @@ def test_responses_client_created_at_uses_utc(openai_unit_test_env: dict[str, st mock_response.output = [mock_message_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore # Verify that created_at is correctly formatted as UTC assert response.created_at is not None @@ -1203,7 +1201,7 @@ def test_service_response_exception_includes_original_error_details() -> None: patch.object(client.client.responses, "parse", side_effect=mock_error), pytest.raises(ServiceResponseException) as exc_info, ): - asyncio.run(client.get_response(messages=messages, response_format=OutputStruct)) + asyncio.run(client.get_response(messages=messages, options={"response_format": OutputStruct})) exception_message = str(exc_info.value) assert "service failed to complete the prompt:" in exception_message @@ -1219,7 +1217,7 @@ def test_get_streaming_response_with_response_format() -> None: with pytest.raises(ServiceResponseException): async def run_streaming(): - async for _ in client.get_streaming_response(messages=messages, response_format=OutputStruct): + async for _ in client.get_streaming_response(messages=messages, options={"response_format": OutputStruct}): pass asyncio.run(run_streaming()) @@ -1518,7 +1516,7 @@ def test_parse_response_from_openai_image_generation_raw_base64(): mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore # Verify the response contains call + result with DataContent output assert len(response.messages[0].contents) == 2 @@ -1555,7 +1553,7 @@ def test_parse_response_from_openai_image_generation_existing_data_uri(): mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore # Verify the response contains call + result with DataContent output assert len(response.messages[0].contents) == 2 @@ -1591,7 +1589,7 @@ def test_parse_response_from_openai_image_generation_format_detection(): mock_response_jpeg.output = [mock_item_jpeg] with patch.object(client, "_get_metadata_from_response", return_value={}): - response_jpeg = client._parse_response_from_openai(mock_response_jpeg, chat_options=ChatOptions()) # type: ignore + response_jpeg = client._parse_response_from_openai(mock_response_jpeg, options={}) # type: ignore result_contents = response_jpeg.messages[0].contents assert isinstance(result_contents[1], ImageGenerationToolResultContent) outputs = result_contents[1].outputs @@ -1617,7 +1615,7 @@ def test_parse_response_from_openai_image_generation_format_detection(): mock_response_webp.output = [mock_item_webp] with patch.object(client, "_get_metadata_from_response", return_value={}): - response_webp = client._parse_response_from_openai(mock_response_webp, chat_options=ChatOptions()) # type: ignore + response_webp = client._parse_response_from_openai(mock_response_webp, options={}) # type: ignore outputs_webp = response_webp.messages[0].contents[1].outputs assert outputs_webp and isinstance(outputs_webp, DataContent) assert outputs_webp.media_type == "image/webp" @@ -1647,7 +1645,7 @@ def test_parse_response_from_openai_image_generation_fallback(): mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore # Verify it falls back to PNG format for unrecognized binary data assert len(response.messages[0].contents) == 2 @@ -1684,7 +1682,7 @@ async def test_prepare_options_store_parameter_handling() -> None: assert "previous_response_id" not in options -def test_openai_responses_client_with_callable_api_key() -> None: +def test_with_callable_api_key() -> None: """Test OpenAIResponsesClient initialization with callable API key.""" async def get_api_key() -> str: @@ -1698,278 +1696,189 @@ async def get_api_key() -> str: assert client.client is not None -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_response() -> None: - """Test OpenAI chat completion responses.""" - openai_responses_client = OpenAIResponsesClient() - - assert isinstance(openai_responses_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append( - ChatMessage( - role="user", - text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " - "Bonded by their love for the natural world and shared curiosity, they uncovered a " - "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " - "of climate change.", - ) - ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) - - # Test that the client can be used to get a response - response = await openai_responses_client.get_response(messages=messages) - - assert response is not None - assert isinstance(response, ChatResponse) - assert "scientists" in response.text - - messages.clear() - messages.append(ChatMessage(role="user", text="The weather in Seattle is sunny")) - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - - # Test that the client can be used to get a response - response = await openai_responses_client.get_response( - messages=messages, - response_format=OutputStruct, - ) - - assert response is not None - assert isinstance(response, ChatResponse) - output = response.value - assert output is not None, "Response value is None" - assert "seattle" in output.location.lower() - assert output.weather is not None +# region Integration Tests @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_response_tools() -> None: - """Test OpenAI chat completion responses.""" - openai_responses_client = OpenAIResponsesClient() - - assert isinstance(openai_responses_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What is the weather in New York?")) - - # Test that the client can be used to get a response - response = await openai_responses_client.get_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - ) - - assert response is not None - assert isinstance(response, ChatResponse) - assert "sunny" in response.text.lower() - - messages.clear() - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - - # Test that the client can be used to get a response - response = await openai_responses_client.get_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - response_format=OutputStruct, - ) - - assert response is not None - assert isinstance(response, ChatResponse) - output = OutputStruct.model_validate_json(response.text) - assert "seattle" in output.location.lower() - assert "sunny" in output.weather.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_streaming() -> None: - """Test OpenAI chat completion responses.""" - openai_responses_client = OpenAIResponsesClient() - - assert isinstance(openai_responses_client, ChatClientProtocol) - - messages: list[ChatMessage] = [] - messages.append( - ChatMessage( - role="user", - text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " - "Bonded by their love for the natural world and shared curiosity, they uncovered a " - "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " - "of climate change.", - ) - ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) - - # Test that the client can be used to get a response - response = await ChatResponse.from_chat_response_generator( - openai_responses_client.get_streaming_response(messages=messages) - ) - - assert "scientists" in response.text - - messages.clear() - messages.append(ChatMessage(role="user", text="The weather in Seattle is sunny")) - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - - response = openai_responses_client.get_streaming_response( - messages=messages, - response_format=OutputStruct, - ) - chunks = [] - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - chunks.append(chunk) - full_message = ChatResponse.from_chat_response_updates(chunks, output_format_type=OutputStruct) - output = full_message.value - assert output is not None, "Response value is None" - assert "seattle" in output.location.lower() - assert output.weather is not None - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_streaming_tools() -> None: - """Test OpenAI chat completion responses.""" - openai_responses_client = OpenAIResponsesClient() - - assert isinstance(openai_responses_client, ChatClientProtocol) - - messages: list[ChatMessage] = [ChatMessage(role="user", text="What is the weather in Seattle?")] - - # Test that the client can be used to get a response - response = openai_responses_client.get_streaming_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - ) - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - - assert "sunny" in full_message.lower() - - messages.clear() - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - - response = openai_responses_client.get_streaming_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - response_format=OutputStruct, - ) - chunks = [] - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - chunks.append(chunk) - - full_message = ChatResponse.from_chat_response_updates(chunks, output_format_type=OutputStruct) - output = full_message.value - assert output is not None, "Response value is None" - assert "seattle" in output.location.lower() - assert "sunny" in output.weather.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_web_search() -> None: +@pytest.mark.parametrize( + "option_name,option_value,needs_validation", + [ + # Simple ChatOptions - just verify they don't fail + param("temperature", 0.7, False, id="temperature"), + param("top_p", 0.9, False, id="top_p"), + param("max_tokens", 500, False, id="max_tokens"), + param("seed", 123, False, id="seed"), + param("user", "test-user-id", False, id="user"), + param("metadata", {"test_key": "test_value"}, False, id="metadata"), + param("frequency_penalty", 0.5, False, id="frequency_penalty"), + param("presence_penalty", 0.3, False, id="presence_penalty"), + param("stop", ["END"], False, id="stop"), + param("allow_multiple_tool_calls", True, False, id="allow_multiple_tool_calls"), + param("tool_choice", "none", True, id="tool_choice_none"), + # OpenAIResponsesOptions - just verify they don't fail + param("safety_identifier", "user-hash-abc123", False, id="safety_identifier"), + param("truncation", "auto", False, id="truncation"), + param("top_logprobs", 5, False, id="top_logprobs"), + param("prompt_cache_key", "test-cache-key", False, id="prompt_cache_key"), + param("max_tool_calls", 3, False, id="max_tool_calls"), + # Complex options requiring output validation + param("tools", [get_weather], True, id="tools_function"), + param("tool_choice", "auto", True, id="tool_choice_auto"), + param("tool_choice", "required", True, id="tool_choice_required_any"), + param( + "tool_choice", + {"mode": "required", "required_function_name": "get_weather"}, + True, + id="tool_choice_required", + ), + param("response_format", OutputStruct, True, id="response_format_pydantic"), + param( + "response_format", + { + "type": "json_schema", + "json_schema": { + "name": "WeatherDigest", + "strict": True, + "schema": { + "title": "WeatherDigest", + "type": "object", + "properties": { + "location": {"type": "string"}, + "conditions": {"type": "string"}, + "temperature_c": {"type": "number"}, + "advisory": {"type": "string"}, + }, + "required": ["location", "conditions", "temperature_c", "advisory"], + "additionalProperties": False, + }, + }, + }, + True, + id="response_format_runtime_json_schema", + ), + ], +) +async def test_integration_options( + option_name: str, + option_value: Any, + needs_validation: bool, +) -> None: + """Parametrized test covering all ChatOptions and OpenAIResponsesOptions. + + Tests both streaming and non-streaming modes for each option to ensure + they don't cause failures. Options marked with needs_validation also + check that the feature actually works correctly. + """ openai_responses_client = OpenAIResponsesClient() + # to ensure toolmode required does not endlessly loop + openai_responses_client.function_invocation_configuration.max_iterations = 1 + + for streaming in [False, True]: + # Prepare test message + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + # Use weather-related prompt for tool tests + messages = [ChatMessage(role="user", text="What is the weather in Seattle?")] + elif option_name.startswith("response_format"): + # Use prompt that works well with structured output + messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")] + messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + else: + # Generic prompt for simple options + messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + + # Build options dict + options: dict[str, Any] = {option_name: option_value} + + # Add tools if testing tool_choice to avoid errors + if option_name.startswith("tool_choice"): + options["tools"] = [get_weather] + + if streaming: + # Test streaming mode + response_gen = openai_responses_client.get_streaming_response( + messages=messages, + options=options, + ) - assert isinstance(openai_responses_client, ChatClientProtocol) - - # Test that the client will use the web search tool - response = await openai_responses_client.get_response( - messages=[ - ChatMessage( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", + output_format = option_value if option_name.startswith("response_format") else None + response = await ChatResponse.from_chat_response_generator(response_gen, output_format_type=output_format) + else: + # Test non-streaming mode + response = await openai_responses_client.get_response( + messages=messages, + options=options, ) - ], - tools=[HostedWebSearchTool()], - tool_choice="auto", - ) - assert response is not None - assert isinstance(response, ChatResponse) - assert "Rumi" in response.text - assert "Mira" in response.text - assert "Zoey" in response.text - - # Test that the client will use the web search tool with location - additional_properties = { - "user_location": { - "country": "US", - "city": "Seattle", - } - } - response = await openai_responses_client.get_response( - messages=[ChatMessage(role="user", text="What is the current weather? Do not ask for my current location.")], - tools=[HostedWebSearchTool(additional_properties=additional_properties)], - tool_choice="auto", - ) - assert response.text is not None + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None, f"No text in response for option '{option_name}'" + assert len(response.text) > 0, f"Empty response for option '{option_name}'" + + # Validate based on option type + if needs_validation: + if option_name.startswith("tools") or option_name.startswith("tool_choice"): + # Should have called the weather function + text = response.text.lower() + assert "sunny" in text or "seattle" in text, f"Tool not invoked for {option_name}" + elif option_name.startswith("response_format"): + if option_value == OutputStruct: + # Should have structured output + assert response.value is not None, "No structured output" + assert isinstance(response.value, OutputStruct) + assert "seattle" in response.value.location.lower() + else: + # Runtime JSON schema + assert response.value is None, "No structured output, can't parse any json." + response_value = json.loads(response.text) + assert isinstance(response_value, dict) + assert "location" in response_value + assert "seattle" in response_value["location"].lower() @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_web_search_streaming() -> None: - openai_responses_client = OpenAIResponsesClient() - - assert isinstance(openai_responses_client, ChatClientProtocol) - - # Test that the client will use the web search tool - response = openai_responses_client.get_streaming_response( - messages=[ - ChatMessage( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", - ) - ], - tools=[HostedWebSearchTool()], - tool_choice="auto", - ) - - assert response is not None - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - assert "Rumi" in full_message - assert "Mira" in full_message - assert "Zoey" in full_message - - # Test that the client will use the web search tool with location - additional_properties = { - "user_location": { - "country": "US", - "city": "Seattle", +async def test_integration_web_search() -> None: + client = OpenAIResponsesClient(model_id="gpt-5") + + for streaming in [False, True]: + content = { + "messages": "Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", + "options": { + "tool_choice": "auto", + "tools": [HostedWebSearchTool()], + }, } - } - response = openai_responses_client.get_streaming_response( - messages=[ChatMessage(role="user", text="What is the current weather? Do not ask for my current location.")], - tools=[HostedWebSearchTool(additional_properties=additional_properties)], - tool_choice="auto", - ) - assert response is not None - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - assert full_message is not None + if streaming: + response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content)) + else: + response = await client.get_response(**content) + + assert response is not None + assert isinstance(response, ChatResponse) + assert "Rumi" in response.text + assert "Mira" in response.text + assert "Zoey" in response.text + + # Test that the client will use the web search tool with location + additional_properties = { + "user_location": { + "country": "US", + "city": "Seattle", + } + } + content = { + "messages": "What is the current weather? Do not ask for my current location.", + "options": { + "tool_choice": "auto", + "tools": [HostedWebSearchTool(additional_properties=additional_properties)], + }, + } + if streaming: + response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content)) + else: + response = await client.get_response(**content) + assert response.text is not None @pytest.mark.skip( @@ -1978,7 +1887,7 @@ async def test_openai_responses_client_web_search_streaming() -> None: ) @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_file_search() -> None: +async def test_integration_file_search() -> None: openai_responses_client = OpenAIResponsesClient() assert isinstance(openai_responses_client, ChatClientProtocol) @@ -1992,8 +1901,10 @@ async def test_openai_responses_client_file_search() -> None: text="What is the weather today? Do a file search to find the answer.", ) ], - tools=[HostedFileSearchTool(inputs=vector_store)], - tool_choice="auto", + options={ + "tool_choice": "auto", + "tools": [HostedFileSearchTool(inputs=vector_store)], + }, ) await delete_vector_store(openai_responses_client, file_id, vector_store.vector_store_id) @@ -2007,7 +1918,7 @@ async def test_openai_responses_client_file_search() -> None: ) @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_streaming_file_search() -> None: +async def test_integration_streaming_file_search() -> None: openai_responses_client = OpenAIResponsesClient() assert isinstance(openai_responses_client, ChatClientProtocol) @@ -2021,8 +1932,10 @@ async def test_openai_responses_client_streaming_file_search() -> None: text="What is the weather today? Do a file search to find the answer.", ) ], - tools=[HostedFileSearchTool(inputs=vector_store)], - tool_choice="auto", + options={ + "tool_choice": "auto", + "tools": [HostedFileSearchTool(inputs=vector_store)], + }, ) assert response is not None @@ -2038,435 +1951,3 @@ async def test_openai_responses_client_streaming_file_search() -> None: assert "sunny" in full_message.lower() assert "75" in full_message - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_basic_run(): - """Test OpenAI Responses Client agent basic run functionality with OpenAIResponsesClient.""" - agent = OpenAIResponsesClient().create_agent( - instructions="You are a helpful assistant.", - ) - - # Test basic run - response = await agent.run("Hello! Please respond with 'Hello World' exactly.") - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - assert "hello world" in response.text.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_basic_run_streaming(): - """Test OpenAI Responses Client agent basic streaming functionality with OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - ) as agent: - # Test streaming run - full_text = "" - async for chunk in agent.run_stream("Please respond with exactly: 'This is a streaming response test.'"): - assert isinstance(chunk, AgentRunResponseUpdate) - if chunk.text: - full_text += chunk.text - - assert len(full_text) > 0 - assert "streaming response test" in full_text.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_thread_persistence(): - """Test OpenAI Responses Client agent thread persistence across runs with OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant with good memory.", - ) as agent: - # Create a new thread that will be reused - thread = agent.get_new_thread() - - # First interaction - first_response = await agent.run("My favorite programming language is Python. Remember this.", thread=thread) - - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - - # Second interaction - test memory - second_response = await agent.run("What is my favorite programming language?", thread=thread) - - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_thread_storage_with_store_true(): - """Test OpenAI Responses Client agent with store=True to verify service_thread_id is returned.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - ) as agent: - # Create a new thread - thread = AgentThread() - - # Initially, service_thread_id should be None - assert thread.service_thread_id is None - - # Run with store=True to store messages on OpenAI side - response = await agent.run( - "Hello! Please remember that my name is Alex.", - thread=thread, - store=True, - ) - - # Validate response - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - - # After store=True, service_thread_id should be populated - assert thread.service_thread_id is not None - assert isinstance(thread.service_thread_id, str) - assert len(thread.service_thread_id) > 0 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_existing_thread(): - """Test OpenAI Responses Client agent with existing thread to continue conversations across agent instances.""" - # First conversation - capture the thread - preserved_thread = None - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant with good memory.", - ) as first_agent: - # Start a conversation and capture the thread - thread = first_agent.get_new_thread() - first_response = await first_agent.run("My hobby is photography. Remember this.", thread=thread) - - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - - # Preserve the thread for reuse - preserved_thread = thread - - # Second conversation - reuse the thread in a new agent instance - if preserved_thread: - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant with good memory.", - ) as second_agent: - # Reuse the preserved thread - second_response = await second_agent.run("What is my hobby?", thread=preserved_thread) - - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - assert "photography" in second_response.text.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_hosted_code_interpreter_tool(): - """Test OpenAI Responses Client agent with HostedCodeInterpreterTool through OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can execute Python code.", - tools=[HostedCodeInterpreterTool()], - ) as agent: - # Test code interpreter functionality - response = await agent.run("Calculate the sum of numbers from 1 to 10 using Python code.") - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - # Should contain calculation result (sum of 1-10 = 55) or code execution content - contains_relevant_content = any( - term in response.text.lower() for term in ["55", "sum", "code", "python", "calculate", "10"] - ) - assert contains_relevant_content or len(response.text.strip()) > 10 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_image_generation_tool(): - """Test OpenAI Responses Client agent with raw image_generation tool through OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can generate images.", - tools=HostedImageGenerationTool(options={"image_size": "1024x1024", "media_type": "png"}), - ) as agent: - # Test image generation functionality - response = await agent.run("Generate an image of a cute red panda sitting on a tree branch in a forest.") - - assert isinstance(response, AgentRunResponse) - assert response.messages - - # Verify we got image content - look for ImageGenerationToolResultContent - image_content_found = False - for message in response.messages: - for content in message.contents: - if content.type == "image_generation_tool_result" and content.outputs: - image_content_found = True - break - if image_content_found: - break - - # The test passes if we got image content - assert image_content_found, "Expected to find image content in response" - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_level_tool_persistence(): - """Test that agent-level tools persist across multiple runs with OpenAI Responses Client.""" - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that uses available tools.", - tools=[get_weather], # Agent-level tool - ) as agent: - # First run - agent-level tool should be available - first_response = await agent.run("What's the weather like in Chicago?") - - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - # Should use the agent-level weather tool - assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) - - # Second run - agent-level tool should still be available (persistence test) - second_response = await agent.run("What's the weather in Miami?") - - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - # Should use the agent-level weather tool again - assert any(term in second_response.text.lower() for term in ["miami", "sunny", "72"]) - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_run_level_tool_isolation(): - """Test that run-level tools are isolated to specific runs and don't persist with OpenAI Responses Client.""" - # Counter to track how many times the weather tool is called - call_count = 0 - - @ai_function - async def get_weather_with_counter(location: Annotated[str, "The location as a city name"]) -> str: - """Get the current weather in a given location.""" - nonlocal call_count - call_count += 1 - return f"The weather in {location} is sunny and 72°F." - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - ) as agent: - # First run - use run-level tool - first_response = await agent.run( - "What's the weather like in Chicago?", - tools=[get_weather_with_counter], # Run-level tool - ) - - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - # Should use the run-level weather tool (call count should be 1) - assert call_count == 1 - assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) - - # Second run - run-level tool should NOT persist (key isolation test) - second_response = await agent.run("What's the weather like in Miami?") - - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - # Should NOT use the weather tool since it was only run-level in previous call - # Call count should still be 1 (no additional calls) - assert call_count == 1 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_chat_options_run_level() -> None: - """Integration test for comprehensive ChatOptions parameter coverage with OpenAI Response Agent.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - ) as agent: - response = await agent.run( - "Provide a brief, helpful response about why the sky blue is.", - max_tokens=600, - model_id="gpt-4o", - user="comprehensive-test-user", - tools=[get_weather], - tool_choice="auto", - ) - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_chat_options_agent_level() -> None: - """Integration test for comprehensive ChatOptions parameter coverage with OpenAI Response Agent.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - max_tokens=100, - temperature=0.7, - top_p=0.9, - seed=123, - user="comprehensive-test-user", - tools=[get_weather], - tool_choice="auto", - ) as agent: - response = await agent.run( - "Provide a brief, helpful response.", - ) - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_hosted_mcp_tool() -> None: - """Integration test for HostedMCPTool with OpenAI Response Agent using Microsoft Learn MCP.""" - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can help with microsoft documentation questions.", - tools=HostedMCPTool( - name="Microsoft Learn MCP", - url="https://learn.microsoft.com/api/mcp", - description="A Microsoft Learn MCP server for documentation questions", - approval_mode="never_require", - ), - ) as agent: - response = await agent.run( - "How to create an Azure storage account using az cli?", - # this needs to be high enough to handle the full MCP tool response. - max_tokens=5000, - ) - - assert isinstance(response, AgentRunResponse) - assert response.text - # Should contain Azure-related content since it's asking about Azure CLI - assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_local_mcp_tool() -> None: - """Integration test for MCPStreamableHTTPTool with OpenAI Response Agent using Microsoft Learn MCP.""" - - mcp_tool = MCPStreamableHTTPTool( - name="Microsoft Learn MCP", - url="https://learn.microsoft.com/api/mcp", - ) - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can help with microsoft documentation questions.", - tools=[mcp_tool], - ) as agent: - response = await agent.run( - "How to create an Azure storage account using az cli?", - max_tokens=200, - ) - - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - # Should contain Azure-related content since it's asking about Azure CLI - assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) - - -class ReleaseBrief(BaseModel): - """Structured output model for release brief testing.""" - - title: str - summary: str - highlights: list[str] - model_config = {"extra": "forbid"} - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_with_response_format_pydantic() -> None: - """Integration test for response_format with Pydantic model using OpenAI Responses Client.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that returns structured JSON responses.", - ) as agent: - response = await agent.run( - "Summarize the following release notes into a ReleaseBrief:\n\n" - "Version 2.0 Release Notes:\n" - "- Added new streaming API for real-time responses\n" - "- Improved error handling with detailed messages\n" - "- Performance boost of 50% in batch processing\n" - "- Fixed memory leak in connection pooling", - response_format=ReleaseBrief, - ) - - # Validate response - assert isinstance(response, AgentRunResponse) - assert response.value is not None - assert isinstance(response.value, ReleaseBrief) - - # Validate structured output fields - brief = response.value - assert len(brief.title) > 0 - assert len(brief.summary) > 0 - assert len(brief.highlights) > 0 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_with_runtime_json_schema() -> None: - """Integration test for response_format with runtime JSON schema using OpenAI Responses Client.""" - runtime_schema = { - "title": "WeatherDigest", - "type": "object", - "properties": { - "location": {"type": "string"}, - "conditions": {"type": "string"}, - "temperature_c": {"type": "number"}, - "advisory": {"type": "string"}, - }, - "required": ["location", "conditions", "temperature_c", "advisory"], - "additionalProperties": False, - } - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="Return only JSON that matches the provided schema. Do not add commentary.", - ) as agent: - response = await agent.run( - "Give a brief weather digest for Seattle.", - additional_chat_options={ - "response_format": { - "type": "json_schema", - "json_schema": { - "name": runtime_schema["title"], - "strict": True, - "schema": runtime_schema, - }, - }, - }, - ) - - # Validate response - assert isinstance(response, AgentRunResponse) - assert response.text is not None - - # Parse JSON and validate structure - import json - - parsed = json.loads(response.text) - assert "location" in parsed - assert "conditions" in parsed - assert "temperature_c" in parsed - assert "advisory" in parsed diff --git a/python/packages/core/tests/workflow/test_group_chat.py b/python/packages/core/tests/workflow/test_group_chat.py index a99af64102..b66900dad7 100644 --- a/python/packages/core/tests/workflow/test_group_chat.py +++ b/python/packages/core/tests/workflow/test_group_chat.py @@ -785,13 +785,13 @@ async def test_set_manager_configures_response_format(self) -> None: chat_client = MagicMock() manager_agent = ChatAgent(chat_client=chat_client, name="Coordinator") - assert manager_agent.chat_options.response_format is None + assert manager_agent.default_options.get("response_format") is None worker = StubAgent("worker", "response") builder = GroupChatBuilder().set_manager(manager_agent).participants([worker]) - assert manager_agent.chat_options.response_format is ManagerSelectionResponse + assert manager_agent.default_options.get("response_format") is ManagerSelectionResponse assert builder._manager_participant is manager_agent # type: ignore[attr-defined] async def test_set_manager_accepts_agent_manager(self) -> None: @@ -820,13 +820,15 @@ class CustomResponse(BaseModel): value: str chat_client = MagicMock() - manager_agent = ChatAgent(chat_client=chat_client, name="Coordinator", response_format=CustomResponse) + manager_agent = ChatAgent( + chat_client=chat_client, name="Coordinator", default_options={"response_format": CustomResponse} + ) worker = StubAgent("worker", "response") with pytest.raises(ValueError, match="response_format must be ManagerSelectionResponse"): GroupChatBuilder().set_manager(manager_agent).participants([worker]) - assert manager_agent.chat_options.response_format is CustomResponse + assert manager_agent.default_options.get("response_format") is CustomResponse class TestFactoryFunctions: diff --git a/python/packages/core/tests/workflow/test_handoff.py b/python/packages/core/tests/workflow/test_handoff.py index 1daeef3943..c39cc3ef08 100644 --- a/python/packages/core/tests/workflow/test_handoff.py +++ b/python/packages/core/tests/workflow/test_handoff.py @@ -504,8 +504,8 @@ def sample_function() -> str: assert hasattr(cloned_agent, "_local_mcp_tools") assert len(cloned_agent._local_mcp_tools) == 1 # type: ignore[reportPrivateUsage] assert cloned_agent._local_mcp_tools[0] == mock_mcp_tool # type: ignore[reportPrivateUsage] - assert cloned_agent.chat_options.tools is not None - assert len(cloned_agent.chat_options.tools) == 1 + assert cloned_agent.default_options.get("tools") is not None + assert len(cloned_agent.default_options.get("tools")) == 1 async def test_return_to_previous_routing(): @@ -658,15 +658,14 @@ async def test_tool_choice_preserved_from_agent_config(): """Verify that agent-level tool_choice configuration is preserved and not overridden.""" from unittest.mock import AsyncMock - from agent_framework import ChatResponse, ToolMode + from agent_framework import ChatResponse # Create a mock chat client that records the tool_choice used recorded_tool_choices: list[Any] = [] - async def mock_get_response(messages: Any, **kwargs: Any) -> ChatResponse: - chat_options = kwargs.get("chat_options") - if chat_options: - recorded_tool_choices.append(chat_options.tool_choice) + async def mock_get_response(messages: Any, options: dict[str, Any] | None = None, **kwargs: Any) -> ChatResponse: + if options: + recorded_tool_choices.append(options.get("tool_choice")) return ChatResponse( messages=[ChatMessage(role=Role.ASSISTANT, text="Response")], response_id="test_response", @@ -675,11 +674,11 @@ async def mock_get_response(messages: Any, **kwargs: Any) -> ChatResponse: mock_client = MagicMock() mock_client.get_response = AsyncMock(side_effect=mock_get_response) - # Create agent with specific tool_choice configuration + # Create agent with specific tool_choice configuration via default_options agent = ChatAgent( chat_client=mock_client, name="test_agent", - tool_choice=ToolMode(mode="required"), # type: ignore[arg-type] + default_options={"tool_choice": {"mode": "required"}}, ) # Run the agent @@ -689,7 +688,7 @@ async def mock_get_response(messages: Any, **kwargs: Any) -> ChatResponse: assert len(recorded_tool_choices) > 0, "No tool_choice recorded" last_tool_choice = recorded_tool_choices[-1] assert last_tool_choice is not None, "tool_choice should not be None" - assert str(last_tool_choice) == "required", f"Expected 'required', got {last_tool_choice}" + assert last_tool_choice == {"mode": "required"}, f"Expected 'required', got {last_tool_choice}" async def test_handoff_builder_with_request_info(): diff --git a/python/packages/declarative/agent_framework_declarative/_models.py b/python/packages/declarative/agent_framework_declarative/_models.py index 01ba94dd56..0132590a1c 100644 --- a/python/packages/declarative/agent_framework_declarative/_models.py +++ b/python/packages/declarative/agent_framework_declarative/_models.py @@ -1,6 +1,5 @@ # Copyright (c) Microsoft. All rights reserved. import os -import sys from collections.abc import MutableMapping from contextvars import ContextVar from typing import Any, Literal, TypeVar, Union @@ -17,10 +16,7 @@ # RuntimeError: .NET runtime not available or misconfigured engine = None -if sys.version_info >= (3, 11): - from typing import overload # pragma: no cover -else: - from typing_extensions import overload # pragma: no cover +from typing import overload logger = get_logger("agent_framework.declarative") diff --git a/python/packages/devui/agent_framework_devui/_discovery.py b/python/packages/devui/agent_framework_devui/_discovery.py index 46169481f2..ed60a402e1 100644 --- a/python/packages/devui/agent_framework_devui/_discovery.py +++ b/python/packages/devui/agent_framework_devui/_discovery.py @@ -883,10 +883,14 @@ async def _extract_tools_from_object(self, obj: Any, obj_type: str) -> list[str] try: if obj_type == "agent": - # For agents, check chat_options.tools first - chat_options = getattr(obj, "chat_options", None) - if chat_options and hasattr(chat_options, "tools"): - for tool in chat_options.tools: + # For agents, check default_options.get("tools") + chat_options = getattr(obj, "default_options", None) + chat_options_tools = None + if chat_options: + chat_options_tools = chat_options.get("tools") + + if chat_options_tools: + for tool in chat_options_tools: if hasattr(tool, "__name__"): tools.append(tool.__name__) elif hasattr(tool, "name"): diff --git a/python/packages/devui/agent_framework_devui/_utils.py b/python/packages/devui/agent_framework_devui/_utils.py index 5b0e218f7a..24cdc9c073 100644 --- a/python/packages/devui/agent_framework_devui/_utils.py +++ b/python/packages/devui/agent_framework_devui/_utils.py @@ -37,17 +37,27 @@ def extract_agent_metadata(entity_object: Any) -> dict[str, Any]: } # Try to get instructions - if hasattr(entity_object, "chat_options") and hasattr(entity_object.chat_options, "instructions"): - metadata["instructions"] = entity_object.chat_options.instructions - - # Try to get model - check both chat_options and chat_client + if hasattr(entity_object, "default_options"): + chat_opts = entity_object.default_options + if isinstance(chat_opts, dict): + if "instructions" in chat_opts: + metadata["instructions"] = chat_opts.get("instructions") + elif hasattr(chat_opts, "instructions"): + metadata["instructions"] = chat_opts.instructions + + # Try to get model - check both default_options and chat_client + if hasattr(entity_object, "default_options"): + chat_opts = entity_object.default_options + if isinstance(chat_opts, dict): + if chat_opts.get("model_id"): + metadata["model"] = chat_opts.get("model_id") + elif hasattr(chat_opts, "model_id") and chat_opts.model_id: + metadata["model"] = chat_opts.model_id if ( - hasattr(entity_object, "chat_options") - and hasattr(entity_object.chat_options, "model_id") - and entity_object.chat_options.model_id + metadata["model"] is None + and hasattr(entity_object, "chat_client") + and hasattr(entity_object.chat_client, "model_id") ): - metadata["model"] = entity_object.chat_options.model_id - elif hasattr(entity_object, "chat_client") and hasattr(entity_object.chat_client, "model_id"): metadata["model"] = entity_object.chat_client.model_id # Try to get chat client type diff --git a/python/packages/devui/tests/test_helpers.py b/python/packages/devui/tests/test_helpers.py index ebb03c4c15..49ae59bf11 100644 --- a/python/packages/devui/tests/test_helpers.py +++ b/python/packages/devui/tests/test_helpers.py @@ -13,8 +13,9 @@ to avoid pytest plugin conflicts when running tests across packages. """ +import sys from collections.abc import AsyncIterable, MutableSequence -from typing import Any +from typing import Any, Generic from agent_framework import ( AgentRunResponse, @@ -24,7 +25,6 @@ BaseChatClient, ChatAgent, ChatMessage, - ChatOptions, ChatResponse, ChatResponseUpdate, ConcurrentBuilder, @@ -35,8 +35,14 @@ TextContent, use_chat_middleware, ) +from agent_framework._clients import TOptions_co from agent_framework._workflows._agent_executor import AgentExecutorResponse +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + # Import real workflow event classes - NOT mocks! from agent_framework._workflows._events import ( ExecutorCompletedEvent, @@ -91,7 +97,7 @@ async def get_streaming_response( @use_chat_middleware -class MockBaseChatClient(BaseChatClient): +class MockBaseChatClient(BaseChatClient[TOptions_co], Generic[TOptions_co]): """Full BaseChatClient mock with middleware support. Use this when testing features that require the full BaseChatClient interface. @@ -106,11 +112,12 @@ def __init__(self, **kwargs: Any): self.call_count: int = 0 self.received_messages: list[list[ChatMessage]] = [] + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: self.call_count += 1 @@ -119,11 +126,12 @@ async def _inner_get_response( return self.run_responses.pop(0) return ChatResponse(messages=ChatMessage(role="assistant", text="Mock response from ChatAgent")) + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: self.call_count += 1 diff --git a/python/packages/foundry_local/agent_framework_foundry_local/__init__.py b/python/packages/foundry_local/agent_framework_foundry_local/__init__.py index dbea932348..d271839cd7 100644 --- a/python/packages/foundry_local/agent_framework_foundry_local/__init__.py +++ b/python/packages/foundry_local/agent_framework_foundry_local/__init__.py @@ -2,7 +2,7 @@ import importlib.metadata -from ._foundry_local_client import FoundryLocalClient +from ._foundry_local_client import FoundryLocalChatOptions, FoundryLocalClient, FoundryLocalSettings try: __version__ = importlib.metadata.version(__name__) @@ -10,6 +10,8 @@ __version__ = "0.0.0" # Fallback for development mode __all__ = [ + "FoundryLocalChatOptions", "FoundryLocalClient", + "FoundryLocalSettings", "__version__", ] diff --git a/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py b/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py index c2b7bd34ab..1cbfde6f38 100644 --- a/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py +++ b/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py @@ -1,8 +1,9 @@ # Copyright (c) Microsoft. All rights reserved. -from typing import Any, ClassVar +import sys +from typing import Any, ClassVar, Generic, TypedDict -from agent_framework import use_chat_middleware, use_function_invocation +from agent_framework import ChatOptions, use_chat_middleware, use_function_invocation from agent_framework._pydantic import AFBaseSettings from agent_framework.exceptions import ServiceInitializationError from agent_framework.observability import use_instrumentation @@ -11,11 +12,93 @@ from foundry_local.models import DeviceType from openai import AsyncOpenAI +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore # pragma: no cover + + __all__ = [ + "FoundryLocalChatOptions", "FoundryLocalClient", + "FoundryLocalSettings", ] +# region Foundry Local Chat Options TypedDict + + +class FoundryLocalChatOptions(ChatOptions, total=False): + """Azure Foundry Local (local model deployment) chat options dict. + + Extends base ChatOptions for local model inference via Foundry Local. + Foundry Local provides an OpenAI-compatible API, so most standard + OpenAI chat completion options are supported. + + See: https://github.com/Azure/azure-ai-foundry-model-inference + + Keys: + # Inherited from ChatOptions (supported via OpenAI-compatible API): + model_id: The model identifier or alias (e.g., 'phi-4-mini'). + temperature: Sampling temperature (0-2). + top_p: Nucleus sampling parameter. + max_tokens: Maximum tokens to generate. + stop: Stop sequences. + tools: List of tools available to the model. + tool_choice: How the model should use tools. + frequency_penalty: Frequency penalty (-2.0 to 2.0). + presence_penalty: Presence penalty (-2.0 to 2.0). + seed: Random seed for reproducibility. + + # Options with limited support (depends on the model): + response_format: Response format specification. + Not all local models support JSON mode. + logit_bias: Token bias dictionary. + May not be supported by all models. + + # Options not supported in Foundry Local: + user: Not used locally. + store: Not applicable for local inference. + metadata: Not applicable for local inference. + + # Foundry Local-specific options: + extra_body: Additional request body parameters to pass to the model. + Can be used for model-specific options not covered by standard API. + + Note: + The actual options supported depend on the specific model being used. + Some models (like Phi-4) may not support all OpenAI API features. + Options not supported by the model will typically be ignored. + """ + + # Foundry Local-specific options + extra_body: dict[str, Any] + """Additional request body parameters for model-specific options.""" + + # ChatOptions fields not applicable for local inference + user: None # type: ignore[misc] + """Not used for local model inference.""" + + store: None # type: ignore[misc] + """Not applicable for local inference.""" + + +FOUNDRY_LOCAL_OPTION_TRANSLATIONS: dict[str, str] = { + "model_id": "model", +} +"""Maps ChatOptions keys to OpenAI API parameter names (for compatibility).""" + +TFoundryLocalChatOptions = TypeVar( + "TFoundryLocalChatOptions", + bound=TypedDict, # type: ignore[valid-type] + default="FoundryLocalChatOptions", + covariant=True, +) + + +# endregion + + class FoundryLocalSettings(AFBaseSettings): """Foundry local model settings. @@ -40,7 +123,7 @@ class FoundryLocalSettings(AFBaseSettings): @use_function_invocation @use_instrumentation @use_chat_middleware -class FoundryLocalClient(OpenAIBaseChatClient): +class FoundryLocalClient(OpenAIBaseChatClient[TFoundryLocalChatOptions], Generic[TFoundryLocalChatOptions]): """Foundry Local Chat completion class.""" def __init__( @@ -125,6 +208,16 @@ def __init__( # You can also use the CLI: `foundry model load phi-4-mini --device Auto` + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework_foundry_local import FoundryLocalChatOptions + + class MyOptions(FoundryLocalChatOptions, total=False): + my_custom_option: str + + client: FoundryLocalClient[MyOptions] = FoundryLocalClient(model_id="phi-4-mini") + response = await client.get_response("Hello", options={"my_custom_option": "value"}) + Raises: ServiceInitializationError: If the specified model ID or alias is not found. Sometimes a model might be available but if you have specified a device diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py index ae01082ca3..ad5b3416fe 100644 --- a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py +++ b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py @@ -3,18 +3,20 @@ import uuid from typing import cast -from agent_framework._agents import ChatAgent -from agent_framework._types import AgentRunResponse, ChatMessage, Role -from agent_framework._workflows import ( +from agent_framework import ( AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, + AgentRunResponse, + ChatAgent, + ChatClientProtocol, + ChatMessage, FunctionExecutor, + Role, Workflow, WorkflowBuilder, WorkflowContext, ) -from agent_framework.openai import OpenAIChatClient from loguru import logger from tau2.data_model.simulation import SimulationRun, TerminationReason # type: ignore[import-untyped] from tau2.data_model.tasks import Task # type: ignore[import-untyped] @@ -156,7 +158,7 @@ def _is_user_stop(self, text: str) -> bool: """Check if user wants to stop the conversation.""" return STOP in text or TRANSFER in text or OUT_OF_SCOPE in text - def assistant_agent(self, assistant_chat_client: OpenAIChatClient) -> ChatAgent: + def assistant_agent(self, assistant_chat_client: ChatClientProtocol) -> ChatAgent: """Create an assistant agent. Users can override this method to provide a custom assistant agent. @@ -205,7 +207,7 @@ def assistant_agent(self, assistant_chat_client: OpenAIChatClient) -> ChatAgent: ), ) - def user_simulator(self, user_simuator_chat_client: OpenAIChatClient, task: Task) -> ChatAgent: + def user_simulator(self, user_simuator_chat_client: ChatClientProtocol, task: Task) -> ChatAgent: """Create a user simulator agent. Users can override this method to provide a custom user simulator agent. @@ -301,8 +303,8 @@ def build_conversation_workflow(self, assistant_agent: ChatAgent, user_simulator async def run( self, task: Task, - assistant_chat_client: OpenAIChatClient, - user_simuator_chat_client: OpenAIChatClient, + assistant_chat_client: ChatClientProtocol, + user_simulator_chat_client: ChatClientProtocol, ) -> list[ChatMessage]: """Run a tau2 task using workflow-based agent orchestration. @@ -317,18 +319,18 @@ async def run( Args: task: Tau2 task containing scenario, policy, and evaluation criteria assistant_chat_client: LLM client for the assistant agent - user_simuator_chat_client: LLM client for the user simulator + user_simulator_chat_client: LLM client for the user simulator Returns: Complete conversation history as ChatMessage list for evaluation """ logger.info(f"Starting workflow agent for task {task.id}: {task.description.purpose}") # type: ignore[unused-ignore] logger.info(f"Assistant chat client: {assistant_chat_client}") - logger.info(f"User simulator chat client: {user_simuator_chat_client}") + logger.info(f"User simulator chat client: {user_simulator_chat_client}") # STEP 1: Create agents assistant_agent = self.assistant_agent(assistant_chat_client) - user_simulator_agent = self.user_simulator(user_simuator_chat_client, task) + user_simulator_agent = self.user_simulator(user_simulator_chat_client, task) # STEP 2: Create the conversation workflow workflow = self.build_conversation_workflow(assistant_agent, user_simulator_agent) diff --git a/python/packages/mem0/agent_framework_mem0/_provider.py b/python/packages/mem0/agent_framework_mem0/_provider.py index 48e508f411..e34c2cf435 100644 --- a/python/packages/mem0/agent_framework_mem0/_provider.py +++ b/python/packages/mem0/agent_framework_mem0/_provider.py @@ -3,22 +3,22 @@ import sys from collections.abc import MutableSequence, Sequence from contextlib import AbstractAsyncContextManager -from typing import Any +from typing import Any, TypedDict from agent_framework import ChatMessage, Context, ContextProvider from agent_framework.exceptions import ServiceInitializationError from mem0 import AsyncMemory, AsyncMemoryClient -if sys.version_info >= (3, 11): - from typing import NotRequired, Self, TypedDict # pragma: no cover -else: - from typing_extensions import NotRequired, Self, TypedDict # pragma: no cover - if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: from typing_extensions import override # type: ignore[import] # pragma: no cover +if sys.version_info >= (3, 11): + from typing import NotRequired, Self # pragma: no cover +else: + from typing_extensions import NotRequired, Self # pragma: no cover + # Type aliases for Mem0 search response formats (v1.1 and v2; v1 is deprecated, but matches the type definition for v2) class MemorySearchResponse_v1_1(TypedDict): diff --git a/python/packages/ollama/agent_framework_ollama/__init__.py b/python/packages/ollama/agent_framework_ollama/__init__.py index 969b607623..d1bd699e1a 100644 --- a/python/packages/ollama/agent_framework_ollama/__init__.py +++ b/python/packages/ollama/agent_framework_ollama/__init__.py @@ -2,7 +2,7 @@ import importlib.metadata -from ._chat_client import OllamaChatClient, OllamaSettings +from ._chat_client import OllamaChatClient, OllamaChatOptions, OllamaSettings try: __version__ = importlib.metadata.version(__name__) @@ -11,6 +11,7 @@ __all__ = [ "OllamaChatClient", + "OllamaChatOptions", "OllamaSettings", "__version__", ] diff --git a/python/packages/ollama/agent_framework_ollama/_chat_client.py b/python/packages/ollama/agent_framework_ollama/_chat_client.py index f047a5d4b3..825ee47bec 100644 --- a/python/packages/ollama/agent_framework_ollama/_chat_client.py +++ b/python/packages/ollama/agent_framework_ollama/_chat_client.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import json +import sys from collections.abc import ( AsyncIterable, Callable, @@ -10,7 +11,7 @@ Sequence, ) from itertools import chain -from typing import Any, ClassVar +from typing import Any, ClassVar, Generic, TypedDict from agent_framework import ( AIFunction, @@ -46,6 +47,229 @@ from ollama._types import Message as OllamaMessage from pydantic import ValidationError +if sys.version_info >= (3, 13): + from typing import TypeVar +else: + from typing_extensions import TypeVar + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + + +__all__ = ["OllamaChatClient", "OllamaChatOptions"] + + +# region Ollama Chat Options TypedDict + + +class OllamaChatOptions(ChatOptions, total=False): + """Ollama-specific chat options dict. + + Extends base ChatOptions with Ollama-specific parameters. + Ollama passes model parameters through the `options` field. + + See: https://github.com/ollama/ollama/blob/main/docs/api.md + + Keys: + # Inherited from ChatOptions (mapped to Ollama options): + model_id: The model name, translates to ``model`` in Ollama API. + temperature: Sampling temperature, translates to ``options.temperature``. + top_p: Nucleus sampling, translates to ``options.top_p``. + max_tokens: Maximum tokens to generate, translates to ``options.num_predict``. + stop: Stop sequences, translates to ``options.stop``. + seed: Random seed for reproducibility, translates to ``options.seed``. + frequency_penalty: Frequency penalty, translates to ``options.frequency_penalty``. + presence_penalty: Presence penalty, translates to ``options.presence_penalty``. + tools: List of function tools. + response_format: Output format, translates to ``format``. + Use 'json' for JSON mode or a JSON schema dict for structured output. + + # Options not supported in Ollama: + tool_choice: Ollama only supports auto tool choice. + allow_multiple_tool_calls: Not configurable. + user: Not supported. + store: Not supported. + logit_bias: Not supported. + metadata: Not supported. + + # Ollama model-level options (placed in `options` dict): + # See: https://github.com/ollama/ollama/blob/main/docs/modelfile.mdx#valid-parameters-and-values + num_predict: Maximum number of tokens to predict (alternative to max_tokens). + top_k: Top-k sampling: limits tokens to k most likely. Higher = more diverse. + min_p: Minimum probability threshold for token selection. + typical_p: Locally typical sampling parameter (0.0-1.0). + repeat_penalty: Penalty for repeating tokens. Higher = less repetition. + repeat_last_n: Number of tokens to consider for repeat penalty. + penalize_newline: Whether to penalize newline characters. + num_ctx: Context window size (number of tokens). + num_batch: Batch size for prompt processing. + num_keep: Number of tokens to keep from initial prompt. + num_gpu: Number of layers to offload to GPU. + main_gpu: Main GPU for computation. + use_mmap: Whether to use memory-mapped files. + num_thread: Number of threads for CPU computation. + numa: Enable NUMA optimization. + + # Ollama-specific top-level options: + keep_alive: How long to keep model loaded (default: '5m'). + think: Whether thinking models should think before responding. + + Examples: + .. code-block:: python + + from agent_framework_ollama import OllamaChatOptions + + # Basic usage - standard options automatically mapped + options: OllamaChatOptions = { + "temperature": 0.7, + "max_tokens": 1000, + "seed": 42, + } + + # With Ollama-specific model options + options: OllamaChatOptions = { + "top_k": 40, + "num_ctx": 4096, + "keep_alive": "10m", + } + + # With JSON output format + options: OllamaChatOptions = { + "response_format": "json", + } + + # With structured output (JSON schema) + options: OllamaChatOptions = { + "response_format": { + "type": "object", + "properties": {"answer": {"type": "string"}}, + "required": ["answer"], + }, + } + """ + + # Ollama model-level options (will be placed in `options` dict) + num_predict: int + """Maximum number of tokens to predict (equivalent to max_tokens).""" + + top_k: int + """Top-k sampling: limits tokens to k most likely. Higher = more diverse.""" + + min_p: float + """Minimum probability threshold for token selection.""" + + typical_p: float + """Locally typical sampling parameter (0.0-1.0).""" + + repeat_penalty: float + """Penalty for repeating tokens. Higher = less repetition.""" + + repeat_last_n: int + """Number of tokens to consider for repeat penalty.""" + + penalize_newline: bool + """Whether to penalize newline characters.""" + + num_ctx: int + """Context window size (number of tokens).""" + + num_batch: int + """Batch size for prompt processing.""" + + num_keep: int + """Number of tokens to keep from initial prompt.""" + + num_gpu: int + """Number of layers to offload to GPU.""" + + main_gpu: int + """Main GPU for computation.""" + + use_mmap: bool + """Whether to use memory-mapped files.""" + + num_thread: int + """Number of threads for CPU computation.""" + + numa: bool + """Enable NUMA optimization.""" + + # Ollama-specific top-level options + keep_alive: str | int + """How long to keep the model loaded in memory after request. + Can be duration string (e.g., '5m', '1h') or seconds as int. + Set to 0 to unload immediately after request.""" + + think: bool + """For thinking models: whether the model should think before responding.""" + + # ChatOptions fields not supported in Ollama + tool_choice: None # type: ignore[misc] + """Not supported. Ollama only supports auto tool choice.""" + + allow_multiple_tool_calls: None # type: ignore[misc] + """Not supported. Not configurable in Ollama.""" + + user: None # type: ignore[misc] + """Not supported in Ollama.""" + + store: None # type: ignore[misc] + """Not supported in Ollama.""" + + logit_bias: None # type: ignore[misc] + """Not supported in Ollama.""" + + metadata: None # type: ignore[misc] + """Not supported in Ollama.""" + + +OLLAMA_OPTION_TRANSLATIONS: dict[str, str] = { + "model_id": "model", + "response_format": "format", +} +"""Maps ChatOptions keys to Ollama API parameter names.""" + +# Keys that should be placed in the nested `options` dict for the Ollama API +OLLAMA_MODEL_OPTIONS: set[str] = { + # From ChatOptions (mapped to options.*) + "temperature", + "top_p", + "max_tokens", # -> num_predict + "stop", + "seed", + "frequency_penalty", + "presence_penalty", + # Ollama-specific model options + "num_predict", + "top_k", + "min_p", + "typical_p", + "repeat_penalty", + "repeat_last_n", + "penalize_newline", + "num_ctx", + "num_batch", + "num_keep", + "num_gpu", + "main_gpu", + "use_mmap", + "num_thread", + "numa", +} + +# Translations for options that go into the nested `options` dict +OLLAMA_MODEL_OPTION_TRANSLATIONS: dict[str, str] = { + "max_tokens": "num_predict", +} +"""Maps ChatOptions keys to Ollama model option parameter names.""" + +TOllamaChatOptions = TypeVar("TOllamaChatOptions", bound=TypedDict, default="OllamaChatOptions", covariant=True) # type: ignore[valid-type] + + +# endregion + class OllamaSettings(AFBaseSettings): """Ollama settings.""" @@ -62,7 +286,7 @@ class OllamaSettings(AFBaseSettings): @use_function_invocation @use_instrumentation @use_chat_middleware -class OllamaChatClient(BaseChatClient): +class OllamaChatClient(BaseChatClient[TOllamaChatOptions], Generic[TOllamaChatOptions]): """Ollama Chat completion class.""" OTEL_PROVIDER_NAME: ClassVar[str] = "ollama" @@ -110,15 +334,16 @@ def __init__( super().__init__(**kwargs) + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: # prepare - options_dict = self._prepare_options(messages, chat_options) + options_dict = self._prepare_options(messages, options) try: # execute @@ -133,15 +358,16 @@ async def _inner_get_response( # process return self._parse_response_from_ollama(response) + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: # prepare - options_dict = self._prepare_options(messages, chat_options) + options_dict = self._prepare_options(messages, options) try: # execute @@ -157,19 +383,37 @@ async def _inner_get_streaming_response( async for part in response_object: yield self._parse_streaming_response_from_ollama(part) - def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions) -> dict[str, Any]: - # tool choice - Currently Ollama only supports auto tool choice - if chat_options.tool_choice == "required": - raise ServiceInvalidRequestError("Ollama does not support required tool choice.") - - run_options = chat_options.to_dict( - exclude={ - "type", - "instructions", - "tool_choice", # Ollama does not support tool_choice configuration - "additional_properties", # handled separately - } - ) + def _prepare_options(self, messages: MutableSequence[ChatMessage], options: dict[str, Any]) -> dict[str, Any]: + # Handle instructions by prepending to messages as system message + instructions = options.get("instructions") + if instructions: + from agent_framework._types import prepend_instructions_to_messages + + messages = prepend_instructions_to_messages(list(messages), instructions, role="system") + + # Keys to exclude from processing + exclude_keys = {"instructions", "tool_choice"} + + # Build run_options and model_options separately + run_options: dict[str, Any] = {} + model_options: dict[str, Any] = {} + + for key, value in options.items(): + if key in exclude_keys or value is None: + continue + + if key in OLLAMA_MODEL_OPTIONS: + # Apply model option translations (e.g., max_tokens -> num_predict) + translated_key = OLLAMA_MODEL_OPTION_TRANSLATIONS.get(key, key) + model_options[translated_key] = value + else: + # Apply top-level translations (e.g., model_id -> model) + translated_key = OLLAMA_OPTION_TRANSLATIONS.get(key, key) + run_options[translated_key] = value + + # Add model options to run_options if any + if model_options: + run_options["options"] = model_options # messages if messages and "messages" not in run_options: @@ -177,12 +421,6 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: if "messages" not in run_options: raise ServiceInvalidRequestError("Messages are required for chat completions") - # translations between ChatOptions and Ollama API - translations = {"model_id": "model"} - for old_key, new_key in translations.items(): - if old_key in run_options and old_key != new_key: - run_options[new_key] = run_options.pop(old_key) - # model id if not run_options.get("model"): if not self.model_id: @@ -190,15 +428,9 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: run_options["model"] = self.model_id # tools - if chat_options.tools and (tools := self._prepare_tools_for_ollama(chat_options.tools)): - run_options["tools"] = tools - - # additional properties - additional_options = { - key: value for key, value in chat_options.additional_properties.items() if value is not None - } - if additional_options: - run_options.update(additional_options) + tools = options.get("tools") + if tools and (prepared_tools := self._prepare_tools_for_ollama(tools)): + run_options["tools"] = prepared_tools return run_options diff --git a/python/packages/ollama/tests/test_ollama_chat_client.py b/python/packages/ollama/tests/test_ollama_chat_client.py index fbb88695c1..e2aebb2a6a 100644 --- a/python/packages/ollama/tests/test_ollama_chat_client.py +++ b/python/packages/ollama/tests/test_ollama_chat_client.py @@ -16,6 +16,7 @@ TextContent, TextReasoningContent, UriContent, + ai_function, chat_middleware, ) from agent_framework.exceptions import ( @@ -113,6 +114,7 @@ def mock_chat_completion_tool_call() -> OllamaChatResponse: ) +@ai_function def hello_world(arg1: str) -> str: return "Hello World" @@ -199,19 +201,6 @@ async def test_empty_messages() -> None: await ollama_chat_client.get_response(messages=[]) -async def test_function_choice_required_argument() -> None: - ollama_chat_client = OllamaChatClient( - host="http://localhost:12345", - model_id="test-model", - ) - with pytest.raises(ServiceInvalidRequestError): - await ollama_chat_client.get_response( - messages=[ChatMessage(text="hello world", role="user")], - tool_choice="required", - tools=[hello_world], - ) - - @patch.object(AsyncClient, "chat", new_callable=AsyncMock) async def test_cmc( mock_chat: AsyncMock, @@ -337,7 +326,7 @@ async def test_cmc_streaming_with_tool_call( chat_history.append(ChatMessage(text="hello world", role="user")) ollama_client = OllamaChatClient() - result = ollama_client.get_streaming_response(messages=chat_history, tools=[hello_world]) + result = ollama_client.get_streaming_response(messages=chat_history, options={"tools": [hello_world]}) chunks: list[ChatResponseUpdate] = [] async for chunk in result: @@ -373,7 +362,9 @@ async def test_cmc_with_hosted_tool_call( ollama_client = OllamaChatClient() await ollama_client.get_response( messages=chat_history, - tools=[HostedWebSearchTool(additional_properties=additional_properties)], + options={ + "tools": HostedWebSearchTool(additional_properties=additional_properties), + }, ) @@ -450,7 +441,7 @@ async def test_cmc_integration_with_tool_call( chat_history.append(ChatMessage(text="Call the hello world function and repeat what it says", role="user")) ollama_client = OllamaChatClient() - result = await ollama_client.get_response(messages=chat_history, tools=[hello_world]) + result = await ollama_client.get_response(messages=chat_history, options={"tools": [hello_world]}) assert "hello" in result.text.lower() and "world" in result.text.lower() assert isinstance(result.messages[-2].contents[0], FunctionResultContent) @@ -478,7 +469,7 @@ async def test_cmc_streaming_integration_with_tool_call( ollama_client = OllamaChatClient() result: AsyncIterable[ChatResponseUpdate] = ollama_client.get_streaming_response( - messages=chat_history, tools=[hello_world] + messages=chat_history, options={"tools": [hello_world]} ) chunks: list[ChatResponseUpdate] = [] diff --git a/python/packages/purview/agent_framework_purview/_models.py b/python/packages/purview/agent_framework_purview/_models.py index 0ee502da1a..e4c27496a9 100644 --- a/python/packages/purview/agent_framework_purview/_models.py +++ b/python/packages/purview/agent_framework_purview/_models.py @@ -1,9 +1,5 @@ # Copyright (c) Microsoft. All rights reserved. -"""Unified Purview model definitions and public export surface.""" - -from __future__ import annotations - from collections.abc import Mapping, MutableMapping, Sequence from datetime import datetime from enum import Enum, Flag, auto @@ -179,6 +175,8 @@ def translate_activity(activity: Activity) -> ProtectionScopeActivities: # Simple value models # -------------------------------------------------------------------------------------- +TAliasSerializable = TypeVar("TAliasSerializable", bound="_AliasSerializable") + class _AliasSerializable(SerializationMixin): """Base class adding alias mapping + pydantic-compat helpers. @@ -232,7 +230,7 @@ def model_dump_json(self, *, by_alias: bool = True, exclude_none: bool = True, * return json.dumps(self.model_dump(by_alias=by_alias, exclude_none=exclude_none, **kwargs)) @classmethod - def model_validate(cls, value: MutableMapping[str, Any]) -> _AliasSerializable: # type: ignore[name-defined] + def model_validate(cls: type[TAliasSerializable], value: MutableMapping[str, Any]) -> TAliasSerializable: # type: ignore[name-defined] return cls(**value) # ------------------------------------------------------------------ diff --git a/python/packages/purview/tests/test_chat_middleware.py b/python/packages/purview/tests/test_chat_middleware.py index 5633488a7e..8d414babb9 100644 --- a/python/packages/purview/tests/test_chat_middleware.py +++ b/python/packages/purview/tests/test_chat_middleware.py @@ -37,7 +37,7 @@ def chat_context(self) -> ChatContext: chat_options = MagicMock() chat_options.model = "test-model" return ChatContext( - chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], chat_options=chat_options + chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options ) async def test_initialization(self, middleware: PurviewChatPolicyMiddleware) -> None: @@ -110,7 +110,7 @@ async def test_streaming_skips_post_check(self, middleware: PurviewChatPolicyMid streaming_context = ChatContext( chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], - chat_options=chat_options, + options=chat_options, is_streaming=True, ) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: @@ -189,7 +189,7 @@ async def test_chat_middleware_handles_payment_required_pre_check(self, mock_cre chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], chat_options=chat_options + chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options ) async def mock_process_messages(*args, **kwargs): @@ -215,7 +215,7 @@ async def test_chat_middleware_ignores_payment_required_when_configured(self, mo chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], chat_options=chat_options + chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options ) async def mock_process_messages(*args, **kwargs): @@ -257,7 +257,7 @@ async def test_chat_middleware_with_ignore_exceptions(self, mock_credential: Asy chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], chat_options=chat_options + chat_client=chat_client, messages=[ChatMessage(role=Role.USER, text="Hello")], options=chat_options ) async def mock_process_messages(*args, **kwargs): diff --git a/python/samples/demos/chatkit-integration/app.py b/python/samples/demos/chatkit-integration/app.py index 95d66b78c7..148a084bae 100644 --- a/python/samples/demos/chatkit-integration/app.py +++ b/python/samples/demos/chatkit-integration/app.py @@ -289,8 +289,10 @@ async def _update_thread_title( # Use the chat client directly for a quick, lightweight call response = await self.weather_agent.chat_client.get_response( messages=title_prompt, - temperature=0.3, - max_tokens=20, + options={ + "temperature": 0.3, + "max_tokens": 20, + }, ) if response.messages and response.messages[-1].text: diff --git a/python/samples/getting_started/agents/anthropic/anthropic_advanced.py b/python/samples/getting_started/agents/anthropic/anthropic_advanced.py index a7f4ae2656..2b727efa8b 100644 --- a/python/samples/getting_started/agents/anthropic/anthropic_advanced.py +++ b/python/samples/getting_started/agents/anthropic/anthropic_advanced.py @@ -3,7 +3,7 @@ import asyncio from agent_framework import HostedMCPTool, HostedWebSearchTool, TextReasoningContent, UsageContent -from agent_framework.anthropic import AnthropicClient +from agent_framework.anthropic import AnthropicChatOptions, AnthropicClient """ Anthropic Chat Agent Example @@ -15,9 +15,9 @@ """ -async def streaming_example() -> None: +async def main() -> None: """Example of streaming response (get results as they are generated).""" - agent = AnthropicClient().create_agent( + agent = AnthropicClient[AnthropicChatOptions]().create_agent( name="DocsAgent", instructions="You are a helpful agent for both Microsoft docs questions and general questions.", tools=[ @@ -27,10 +27,12 @@ async def streaming_example() -> None: ), HostedWebSearchTool(), ], - # anthropic needs a value for the max_tokens parameter - # we set it to 1024, but you can override like this: - max_tokens=20000, - additional_chat_options={"thinking": {"type": "enabled", "budget_tokens": 10000}}, + default_options={ + # anthropic needs a value for the max_tokens parameter + # we set it to 1024, but you can override like this: + "max_tokens": 20000, + "thinking": {"type": "enabled", "budget_tokens": 10000}, + }, ) query = "Can you compare Python decorators with C# attributes?" @@ -48,11 +50,5 @@ async def streaming_example() -> None: print("\n") -async def main() -> None: - print("=== Anthropic Example ===") - - await streaming_example() - - if __name__ == "__main__": asyncio.run(main()) diff --git a/python/samples/getting_started/agents/anthropic/anthropic_foundry.py b/python/samples/getting_started/agents/anthropic/anthropic_foundry.py index cb1b690d54..2e04dfebaa 100644 --- a/python/samples/getting_started/agents/anthropic/anthropic_foundry.py +++ b/python/samples/getting_started/agents/anthropic/anthropic_foundry.py @@ -38,10 +38,12 @@ async def main() -> None: ), HostedWebSearchTool(), ], - # anthropic needs a value for the max_tokens parameter - # we set it to 1024, but you can override like this: - max_tokens=20000, - additional_chat_options={"thinking": {"type": "enabled", "budget_tokens": 10000}}, + default_options={ + # anthropic needs a value for the max_tokens parameter + # we set it to 1024, but you can override like this: + "max_tokens": 20000, + "thinking": {"type": "enabled", "budget_tokens": 10000}, + }, ) query = "Can you compare Python decorators with C# attributes?" diff --git a/python/samples/getting_started/agents/anthropic/anthropic_skills.py b/python/samples/getting_started/agents/anthropic/anthropic_skills.py index 331b6405fb..2624a9742b 100644 --- a/python/samples/getting_started/agents/anthropic/anthropic_skills.py +++ b/python/samples/getting_started/agents/anthropic/anthropic_skills.py @@ -5,7 +5,7 @@ from pathlib import Path from agent_framework import HostedCodeInterpreterTool, HostedFileContent -from agent_framework.anthropic import AnthropicClient +from agent_framework.anthropic import AnthropicChatOptions, AnthropicClient logger = logging.getLogger(__name__) """ @@ -22,7 +22,7 @@ async def main() -> None: """Example of streaming response (get results as they are generated).""" - client = AnthropicClient(additional_beta_flags=["skills-2025-10-02"]) + client = AnthropicClient[AnthropicChatOptions](additional_beta_flags=["skills-2025-10-02"]) # List Anthropic-managed Skills skills = await client.anthropic_client.beta.skills.list(source="anthropic", betas=["skills-2025-10-02"]) @@ -35,8 +35,8 @@ async def main() -> None: name="DocsAgent", instructions="You are a helpful agent for creating powerpoint presentations.", tools=HostedCodeInterpreterTool(), - max_tokens=20000, - additional_chat_options={ + default_options={ + "max_tokens": 20000, "thinking": {"type": "enabled", "budget_tokens": 10000}, "container": {"skills": [{"type": "anthropic", "skill_id": "pptx", "version": "latest"}]}, }, diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_image_generation.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_image_generation.py index 8274c43ab0..63e6155b0d 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_image_generation.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_image_generation.py @@ -44,7 +44,7 @@ async def main() -> None: result = await agent.run( query, # These additional options are required for image generation - additional_chat_options={ + options={ "extra_headers": {"x-ms-oai-image-generation-deployment": "gpt-image-1-mini"}, }, ) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_runtime_json_schema.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_runtime_json_schema.py index 17bf359afe..b2865da64b 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_runtime_json_schema.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_runtime_json_schema.py @@ -46,7 +46,7 @@ async def main() -> None: result = await agent.run( query, # Specify type to use as response - additional_chat_options={ + options={ "response_format": { "type": "json_schema", "json_schema": { diff --git a/python/samples/getting_started/agents/custom/custom_chat_client.py b/python/samples/getting_started/agents/custom/custom_chat_client.py index 67bfc0f9cb..f604571470 100644 --- a/python/samples/getting_started/agents/custom/custom_chat_client.py +++ b/python/samples/getting_started/agents/custom/custom_chat_client.py @@ -2,13 +2,13 @@ import asyncio import random +import sys from collections.abc import AsyncIterable, MutableSequence -from typing import Any, ClassVar +from typing import Any, ClassVar, Generic from agent_framework import ( BaseChatClient, ChatMessage, - ChatOptions, ChatResponse, ChatResponseUpdate, Role, @@ -16,6 +16,12 @@ use_chat_middleware, use_function_invocation, ) +from agent_framework._clients import TOptions_co + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover """ Custom Chat Client Implementation Example @@ -27,7 +33,7 @@ @use_function_invocation @use_chat_middleware -class EchoingChatClient(BaseChatClient): +class EchoingChatClient(BaseChatClient[TOptions_co], Generic[TOptions_co]): """A custom chat client that echoes messages back with modifications. This demonstrates how to implement a custom chat client by extending BaseChatClient @@ -46,11 +52,12 @@ def __init__(self, *, prefix: str = "Echo:", **kwargs: Any) -> None: super().__init__(**kwargs) self.prefix = prefix + @override async def _inner_get_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: """Echo back the user's message with a prefix.""" @@ -77,16 +84,17 @@ async def _inner_get_response( response_id=f"echo-resp-{random.randint(1000, 9999)}", ) + @override async def _inner_get_streaming_response( self, *, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions, + options: dict[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: """Stream back the echoed message character by character.""" # Get the complete response first - response = await self._inner_get_response(messages=messages, chat_options=chat_options, **kwargs) + response = await self._inner_get_response(messages=messages, options=options, **kwargs) if response.messages: response_text = response.messages[0].text or "" diff --git a/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py b/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py index 5821c2bcf0..21deddd857 100644 --- a/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py +++ b/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py @@ -24,7 +24,7 @@ async def reasoning_example() -> None: agent = OllamaChatClient().create_agent( name="TimeAgent", instructions="You are a helpful agent answer in one sentence.", - additional_chat_options={"think": True}, # Enable Reasoning on agent level + default_options={"think": True}, # Enable Reasoning on agent level ) query = "Hey what is 3+4? Can you explain how you got to that answer?" print(f"User: {query}") diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py index f461c2864b..3489dc0489 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py @@ -3,7 +3,7 @@ import asyncio import json -from agent_framework.openai import OpenAIChatClient +from agent_framework.openai import OpenAIChatClient, OpenAIChatOptions """ OpenAI Chat Client Runtime JSON Schema Example @@ -32,7 +32,7 @@ async def non_streaming_example() -> None: print("=== Non-streaming runtime JSON schema example ===") - agent = OpenAIChatClient().create_agent( + agent = OpenAIChatClient[OpenAIChatOptions]().create_agent( name="RuntimeSchemaAgent", instructions="Return only JSON that matches the provided schema. Do not add commentary.", ) @@ -42,7 +42,7 @@ async def non_streaming_example() -> None: response = await agent.run( query, - additional_chat_options={ + options={ "response_format": { "type": "json_schema", "json_schema": { @@ -76,7 +76,7 @@ async def streaming_example() -> None: chunks: list[str] = [] async for chunk in agent.run_stream( query, - additional_chat_options={ + options={ "response_format": { "type": "json_schema", "json_schema": { diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_reasoning.py b/python/samples/getting_started/agents/openai/openai_responses_client_reasoning.py index b07a7fb314..1b06e9db04 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_reasoning.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_reasoning.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework.openai import OpenAIResponsesClient +from agent_framework.openai import OpenAIResponsesClient, OpenAIResponsesOptions """ OpenAI Responses Client Reasoning Example @@ -10,19 +10,20 @@ This sample demonstrates advanced reasoning capabilities using OpenAI's gpt-5 models, showing step-by-step reasoning process visualization and complex problem-solving. -This uses the additional_chat_options parameter to enable reasoning with high effort and detailed summaries. -You can also set these options at the run level, since they are api and/or provider specific, you will need to lookup -the correct values for your provider, since these are passed through as-is. +This uses the default_options parameter to enable reasoning with high effort and detailed summaries. +You can also set these options at the run level using the options parameter. +Since these are api and/or provider specific, you will need to lookup +the correct values for your provider, as they are passed through as-is. In this case they are here: https://platform.openai.com/docs/api-reference/responses/create#responses-create-reasoning """ -agent = OpenAIResponsesClient(model_id="gpt-5").create_agent( +agent = OpenAIResponsesClient[OpenAIResponsesOptions](model_id="gpt-5").create_agent( name="MathHelper", instructions="You are a personal math tutor. When asked a math question, " "reason over how best to approach the problem and share your thought process.", - additional_chat_options={"reasoning": {"effort": "high", "summary": "detailed"}}, + default_options={"reasoning": {"effort": "high", "summary": "detailed"}}, ) diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_runtime_json_schema.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_runtime_json_schema.py index c32a6a5880..14aff76760 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_runtime_json_schema.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_runtime_json_schema.py @@ -42,7 +42,7 @@ async def non_streaming_example() -> None: response = await agent.run( query, - additional_chat_options={ + options={ "response_format": { "type": "json_schema", "json_schema": { @@ -76,7 +76,7 @@ async def streaming_example() -> None: chunks: list[str] = [] async for chunk in agent.run_stream( query, - additional_chat_options={ + options={ "response_format": { "type": "json_schema", "json_schema": { diff --git a/python/samples/getting_started/chat_client/typed_options.py b/python/samples/getting_started/chat_client/typed_options.py new file mode 100644 index 0000000000..533b214ebe --- /dev/null +++ b/python/samples/getting_started/chat_client/typed_options.py @@ -0,0 +1,182 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from typing import Literal + +from agent_framework import ChatAgent +from agent_framework.anthropic import AnthropicClient +from agent_framework.openai import OpenAIChatClient, OpenAIChatOptions + +"""TypedDict-based Chat Options. + +In Agent Framework, we have made ChatClient and ChatAgent generic over a ChatOptions typeddict, this means that +you can override which options are available for a given client or agent by providing your own TypedDict subclass. +And we include the most common options for all ChatClient providers out of the box. + +This sample demonstrates the TypedDict-based approach for chat client and agent options, +which provides: +1. IDE autocomplete for available options +2. Type checking to catch errors at development time +3. An example of defining provider-specific options by extending the base options, + including overriding unsupported options. + +The sample shows usage with both OpenAI and Anthropic clients, demonstrating +how provider-specific options work for ChatClient and ChatAgent. But the same approach works for other providers too. +""" + + +async def demo_anthropic_chat_client() -> None: + """Demonstrate Anthropic ChatClient with typed options and validation.""" + print("\n=== Anthropic ChatClient with TypedDict Options ===\n") + + # Create Anthropic client + client = AnthropicClient(model_id="claude-sonnet-4-5-20250929") + + # Standard options work great: + response = await client.get_response( + "What is the capital of France?", + options={ + "temperature": 0.5, + "max_tokens": 1000, + # Anthropic-specific options: + "thinking": {"type": "enabled", "budget_tokens": 1000}, + # "top_k": 40, # <-- Uncomment for Anthropic-specific option + }, + ) + + print(f"Anthropic Response: {response.text}") + print(f"Model used: {response.model_id}") + + +async def demo_anthropic_agent() -> None: + """Demonstrate ChatAgent with Anthropic client and typed options.""" + print("\n=== ChatAgent with Anthropic and Typed Options ===\n") + + client = AnthropicClient(model_id="claude-sonnet-4-5-20250929") + + # Create a typed agent for Anthropic - IDE knows Anthropic-specific options! + agent = ChatAgent( + chat_client=client, + name="claude-assistant", + instructions="You are a helpful assistant powered by Claude. Be concise.", + default_options={ + "temperature": 0.5, + "max_tokens": 200, + "top_k": 40, # Anthropic-specific option, uncomment to try + }, + ) + + # Run the agent + response = await agent.run("Explain quantum computing in one sentence.") + + print(f"Agent Response: {response.text}") + + +class OpenAIReasoningChatOptions(OpenAIChatOptions, total=False): + """Chat options for OpenAI reasoning models (o1, o3, o4-mini, etc.). + + Reasoning models have different parameter support compared to standard models. + This TypedDict marks unsupported parameters with ``None`` type. + + Examples: + .. code-block:: python + + from agent_framework.openai import OpenAIReasoningChatOptions + + options: OpenAIReasoningChatOptions = { + "model_id": "o3", + "reasoning_effort": "high", + "max_tokens": 4096, + } + """ + + # Reasoning-specific parameters + reasoning_effort: Literal["none", "minimal", "low", "medium", "high", "xhigh"] + + # Unsupported parameters for reasoning models (override with None) + temperature: None + top_p: None + frequency_penalty: None + presence_penalty: None + logit_bias: None + logprobs: None + top_logprobs: None + stop: None # Not supported for o3 and o4-mini + + +async def demo_openai_chat_client_reasoning_models() -> None: + """Demonstrate OpenAI ChatClient with typed options for reasoning models.""" + print("\n=== OpenAI ChatClient with TypedDict Options ===\n") + + # Create OpenAI client + client = OpenAIChatClient[OpenAIReasoningChatOptions]() + + # With specific options, you get full IDE autocomplete! + # Try typing `client.get_response("Hello", options={` and see the suggestions + response = await client.get_response( + "What is 2 + 2?", + options={ + "model_id": "o3", + "max_tokens": 100, + "allow_multiple_tool_calls": True, + # OpenAI-specific options work: + "reasoning_effort": "medium", + # Unsupported options are caught by type checker (uncomment to see): + # "temperature": 0.7, + # "random": 234, + }, + ) + + print(f"OpenAI Response: {response.text}") + print(f"Model used: {response.model_id}") + + +async def demo_openai_agent() -> None: + """Demonstrate ChatAgent with OpenAI client and typed options.""" + print("\n=== ChatAgent with OpenAI and Typed Options ===\n") + + # Create a typed agent - IDE will autocomplete options! + # The type annotation can be done either on the agent like below, + # or on the client when constructing the client instance: + # client = OpenAIChatClient[OpenAIReasoningChatOptions]() + agent = ChatAgent[OpenAIReasoningChatOptions]( + chat_client=OpenAIChatClient(), + name="weather-assistant", + instructions="You are a helpful assistant. Answer concisely.", + # Options can be set at construction time + default_options={ + "model_id": "o3", + "max_tokens": 100, + "allow_multiple_tool_calls": True, + # OpenAI-specific options work: + "reasoning_effort": "medium", + # Unsupported options are caught by type checker (uncomment to see): + # "temperature": 0.7, + # "random": 234, + }, + ) + + # Or pass options at runtime - they override construction options + response = await agent.run( + "What is 25 * 47?", + options={ + "reasoning_effort": "high", # Override for a run + }, + ) + + print(f"Agent Response: {response.text}") + + +async def main() -> None: + """Run all Typed Options demonstrations.""" + # # Anthropic demos (requires ANTHROPIC_API_KEY) + await demo_anthropic_chat_client() + await demo_anthropic_agent() + + # OpenAI demos (requires OPENAI_API_KEY) + await demo_openai_chat_client_reasoning_models() + await demo_openai_agent() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/context_providers/simple_context_provider.py b/python/samples/getting_started/context_providers/simple_context_provider.py index 4dfa4ee29d..9e0eaefabd 100644 --- a/python/samples/getting_started/context_providers/simple_context_provider.py +++ b/python/samples/getting_started/context_providers/simple_context_provider.py @@ -4,7 +4,7 @@ from collections.abc import MutableSequence, Sequence from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, ChatMessage, ChatOptions, Context, ContextProvider +from agent_framework import ChatAgent, ChatClientProtocol, ChatMessage, Context, ContextProvider from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential from pydantic import BaseModel @@ -46,11 +46,9 @@ async def invoked( # Use the chat client to extract structured information result = await self._chat_client.get_response( messages=request_messages, # type: ignore - chat_options=ChatOptions( - instructions="Extract the user's name and age from the message if present. " - "If not present return nulls.", - response_format=UserInfo, - ), + instructions="Extract the user's name and age from the message if present. " + "If not present return nulls.", + response_format=UserInfo, ) # Update user info with extracted data diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index b0fbb7eea1..aab84478ce 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -17,7 +17,6 @@ FunctionResultContent, RequestInfoEvent, Role, - ToolMode, WorkflowBuilder, WorkflowContext, WorkflowOutputEvent, @@ -177,7 +176,7 @@ def create_writer_agent() -> ChatAgent: "produce a 3-sentence draft." ), tools=[fetch_product_brief, get_brand_voice_profile], - tool_choice=ToolMode.REQUIRED_ANY, + tool_choice="required", ) diff --git a/python/uv.lock b/python/uv.lock index 26fb2ed57e..44ecd21be0 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -489,7 +489,7 @@ math = [ tau2 = [ { name = "loguru", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] @@ -608,7 +608,7 @@ source = { editable = "packages/redis" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "redis", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "redisvl", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] @@ -1476,7 +1476,7 @@ resolution-markers = [ "python_full_version == '3.11.*' and sys_platform == 'win32'", ] dependencies = [ - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/58/01/1253e6698a07380cd31a736d248a3f2a50a7c88779a1813da27503cadc2a/contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880", size = 13466174, upload-time = "2025-07-26T12:03:12.549Z" } wheels = [ @@ -1959,11 +1959,11 @@ wheels = [ [[package]] name = "filelock" -version = "3.20.2" +version = "3.20.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c1/e0/a75dbe4bca1e7d41307323dad5ea2efdd95408f74ab2de8bd7dba9b51a1a/filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64", size = 19510, upload-time = "2026-01-02T15:33:32.582Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/65/ce7f1b70157833bf3cb851b556a37d4547ceafc158aa9b34b36782f23696/filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1", size = 19485, upload-time = "2026-01-09T17:55:05.421Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/30/ab407e2ec752aa541704ed8f93c11e2a5d92c168b8a755d818b74a3c5c2d/filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8", size = 16697, upload-time = "2026-01-02T15:33:31.133Z" }, + { url = "https://files.pythonhosted.org/packages/b5/36/7fb70f04bf00bc646cd5bb45aa9eddb15e19437a28b8fb2b4a5249fac770/filelock-3.20.3-py3-none-any.whl", hash = "sha256:4b0dda527ee31078689fc205ec4f1c1bf7d56cf88b6dc9426c4f230e46c2dce1", size = 16701, upload-time = "2026-01-09T17:55:04.334Z" }, ] [[package]] @@ -2215,11 +2215,11 @@ wheels = [ [[package]] name = "fsspec" -version = "2025.12.0" +version = "2026.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b6/27/954057b0d1f53f086f681755207dda6de6c660ce133c829158e8e8fe7895/fsspec-2025.12.0.tar.gz", hash = "sha256:c505de011584597b1060ff778bb664c1bc022e87921b0e4f10cc9c44f9635973", size = 309748, upload-time = "2025-12-03T15:23:42.687Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d5/7d/5df2650c57d47c57232af5ef4b4fdbff182070421e405e0d62c6cdbfaa87/fsspec-2026.1.0.tar.gz", hash = "sha256:e987cb0496a0d81bba3a9d1cee62922fb395e7d4c3b575e57f547953334fe07b", size = 310496, upload-time = "2026-01-09T15:21:35.562Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/51/c7/b64cae5dba3a1b138d7123ec36bb5ccd39d39939f18454407e5468f4763f/fsspec-2025.12.0-py3-none-any.whl", hash = "sha256:8bf1fe301b7d8acfa6e8571e3b1c3d158f909666642431cc78a1b7b4dbc5ec5b", size = 201422, upload-time = "2025-12-03T15:23:41.434Z" }, + { url = "https://files.pythonhosted.org/packages/01/c9/97cc5aae1648dcb851958a3ddf73ccd7dbe5650d95203ecb4d7720b4cdbf/fsspec-2026.1.0-py3-none-any.whl", hash = "sha256:cb76aa913c2285a3b49bdd5fc55b1d7c708d7208126b60f2eb8194fe1b4cbdcc", size = 201838, upload-time = "2026-01-09T15:21:34.041Z" }, ] [[package]] @@ -3068,7 +3068,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.80.13" +version = "1.80.15" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3086,9 +3086,9 @@ dependencies = [ { name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tokenizers", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e1/b4/ef75838159aabee15ad93d65ee0e91d04ba0e310784b7b0d3f490cca270c/litellm-1.80.13.tar.gz", hash = "sha256:61ed22dfad633ada3b97dd8a50d8e8d804da0115105006d2f9d77ba3fb247a0b", size = 13277620, upload-time = "2026-01-09T04:37:08.529Z" } +sdist = { url = "https://files.pythonhosted.org/packages/12/41/9b28df3e4739df83ddb32dfb2bccb12ad271d986494c9fd60e4927a0a6c3/litellm-1.80.15.tar.gz", hash = "sha256:759d09f33c9c6028c58dcdf71781b17b833ee926525714e09a408602be27f54e", size = 13376508, upload-time = "2026-01-11T18:31:44.95Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a6/90/e8e0ad7f57d3a56c3411b3867e02768f9722b5975a263c8aaaaba6693d91/litellm-1.80.13-py3-none-any.whl", hash = "sha256:43dcdbca010961f17d7a5a6a995a38d1a46101350959b0e8269576cfe913cf0b", size = 11562501, upload-time = "2026-01-09T04:37:05.551Z" }, + { url = "https://files.pythonhosted.org/packages/df/3b/b1bd693721ccb3c9a37c8233d019a643ac57bef5a93f279e5a63839ee4db/litellm-1.80.15-py3-none-any.whl", hash = "sha256:f354e49456985a235b9ed99df1c19d686d30501f96e68882dcc5b29b1e7c59d9", size = 11670707, upload-time = "2026-01-11T18:31:41.67Z" }, ] [package.optional-dependencies] @@ -3130,11 +3130,11 @@ wheels = [ [[package]] name = "litellm-proxy-extras" -version = "0.4.20" +version = "0.4.21" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/44/ab/df85ce715ebf488cacba338607f67d46c4e4db0b717c9d2f904b8dc7de12/litellm_proxy_extras-0.4.20.tar.gz", hash = "sha256:4fcc95db25cc8b75abbc3f00bb79fd6b94edd1b838ad7bb12cf839b39c67923a", size = 21044, upload-time = "2026-01-07T19:11:32.562Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/1b/18fd5dd6b89bc7f10ea9af49fdb7239dcb77cf59c80030016ac2bc7284d2/litellm_proxy_extras-0.4.21.tar.gz", hash = "sha256:fa0e012984aa8e5114f88f4bad53d6abb589e5ca3eab445f74f8ddeceb62d848", size = 21364, upload-time = "2026-01-10T20:00:27.403Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d4/f5/eb350c49e7cf09db5b335aaeef410c2094e19c84bfe51733cab8470dc011/litellm_proxy_extras-0.4.20-py3-none-any.whl", hash = "sha256:7737cd693dd1aa0bd25ada6d300b37f42c8c18d1820535aceb0ed38ed21f68f5", size = 46565, upload-time = "2026-01-07T19:11:29.728Z" }, + { url = "https://files.pythonhosted.org/packages/f2/26/920d1a89196fe0ffb55d054312dbf5c2110cbffabbc77c71df0f0455c270/litellm_proxy_extras-0.4.21-py3-none-any.whl", hash = "sha256:83a1734e9773610945230606012e602bbcbfba1c60fde836d51102c1a296f166", size = 47136, upload-time = "2026-01-10T20:00:25.849Z" }, ] [[package]] @@ -3258,7 +3258,7 @@ dependencies = [ { name = "fonttools", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "kiwisolver", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "packaging", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pillow", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pyparsing", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3425,7 +3425,7 @@ version = "0.5.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0e/4a/c27b42ed9b1c7d13d9ba8b6905dece787d6259152f2309338aed29b2447b/ml_dtypes-0.5.4.tar.gz", hash = "sha256:8ab06a50fb9bf9666dd0fe5dfb4676fa2b0ac0f31ecff72a6c3af8e22c063453", size = 692314, upload-time = "2025-11-17T22:32:31.031Z" } wheels = [ @@ -3780,7 +3780,7 @@ wheels = [ [[package]] name = "numpy" -version = "2.4.0" +version = "2.4.1" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'darwin'", @@ -3796,79 +3796,79 @@ resolution-markers = [ "python_full_version == '3.12.*' and sys_platform == 'win32'", "python_full_version == '3.11.*' and sys_platform == 'win32'", ] -sdist = { url = "https://files.pythonhosted.org/packages/a4/7a/6a3d14e205d292b738db449d0de649b373a59edb0d0b4493821d0a3e8718/numpy-2.4.0.tar.gz", hash = "sha256:6e504f7b16118198f138ef31ba24d985b124c2c469fe8467007cf30fd992f934", size = 20685720, upload-time = "2025-12-20T16:18:19.023Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/26/7e/7bae7cbcc2f8132271967aa03e03954fc1e48aa1f3bf32b29ca95fbef352/numpy-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:316b2f2584682318539f0bcaca5a496ce9ca78c88066579ebd11fd06f8e4741e", size = 16940166, upload-time = "2025-12-20T16:15:43.434Z" }, - { url = "https://files.pythonhosted.org/packages/0f/27/6c13f5b46776d6246ec884ac5817452672156a506d08a1f2abb39961930a/numpy-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2718c1de8504121714234b6f8241d0019450353276c88b9453c9c3d92e101db", size = 12641781, upload-time = "2025-12-20T16:15:45.701Z" }, - { url = "https://files.pythonhosted.org/packages/14/1c/83b4998d4860d15283241d9e5215f28b40ac31f497c04b12fa7f428ff370/numpy-2.4.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:21555da4ec4a0c942520ead42c3b0dc9477441e085c42b0fbdd6a084869a6f6b", size = 5470247, upload-time = "2025-12-20T16:15:47.943Z" }, - { url = "https://files.pythonhosted.org/packages/54/08/cbce72c835d937795571b0464b52069f869c9e78b0c076d416c5269d2718/numpy-2.4.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:413aa561266a4be2d06cd2b9665e89d9f54c543f418773076a76adcf2af08bc7", size = 6799807, upload-time = "2025-12-20T16:15:49.795Z" }, - { url = "https://files.pythonhosted.org/packages/ff/be/2e647961cd8c980591d75cdcd9e8f647d69fbe05e2a25613dc0a2ea5fb1a/numpy-2.4.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0feafc9e03128074689183031181fac0897ff169692d8492066e949041096548", size = 14701992, upload-time = "2025-12-20T16:15:51.615Z" }, - { url = "https://files.pythonhosted.org/packages/a2/fb/e1652fb8b6fd91ce6ed429143fe2e01ce714711e03e5b762615e7b36172c/numpy-2.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8fdfed3deaf1928fb7667d96e0567cdf58c2b370ea2ee7e586aa383ec2cb346", size = 16646871, upload-time = "2025-12-20T16:15:54.129Z" }, - { url = "https://files.pythonhosted.org/packages/62/23/d841207e63c4322842f7cd042ae981cffe715c73376dcad8235fb31debf1/numpy-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e06a922a469cae9a57100864caf4f8a97a1026513793969f8ba5b63137a35d25", size = 16487190, upload-time = "2025-12-20T16:15:56.147Z" }, - { url = "https://files.pythonhosted.org/packages/bc/a0/6a842c8421ebfdec0a230e65f61e0dabda6edbef443d999d79b87c273965/numpy-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:927ccf5cd17c48f801f4ed43a7e5673a2724bd2171460be3e3894e6e332ef83a", size = 18580762, upload-time = "2025-12-20T16:15:58.524Z" }, - { url = "https://files.pythonhosted.org/packages/0a/d1/c79e0046641186f2134dde05e6181825b911f8bdcef31b19ddd16e232847/numpy-2.4.0-cp311-cp311-win32.whl", hash = "sha256:882567b7ae57c1b1a0250208cc21a7976d8cbcc49d5a322e607e6f09c9e0bd53", size = 6233359, upload-time = "2025-12-20T16:16:00.938Z" }, - { url = "https://files.pythonhosted.org/packages/fc/f0/74965001d231f28184d6305b8cdc1b6fcd4bf23033f6cb039cfe76c9fca7/numpy-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:8b986403023c8f3bf8f487c2e6186afda156174d31c175f747d8934dfddf3479", size = 12601132, upload-time = "2025-12-20T16:16:02.484Z" }, - { url = "https://files.pythonhosted.org/packages/65/32/55408d0f46dfebce38017f5bd931affa7256ad6beac1a92a012e1fbc67a7/numpy-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:3f3096405acc48887458bbf9f6814d43785ac7ba2a57ea6442b581dedbc60ce6", size = 10573977, upload-time = "2025-12-20T16:16:04.77Z" }, - { url = "https://files.pythonhosted.org/packages/8b/ff/f6400ffec95de41c74b8e73df32e3fff1830633193a7b1e409be7fb1bb8c/numpy-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a8b6bb8369abefb8bd1801b054ad50e02b3275c8614dc6e5b0373c305291037", size = 16653117, upload-time = "2025-12-20T16:16:06.709Z" }, - { url = "https://files.pythonhosted.org/packages/fd/28/6c23e97450035072e8d830a3c411bf1abd1f42c611ff9d29e3d8f55c6252/numpy-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e284ca13d5a8367e43734148622caf0b261b275673823593e3e3634a6490f83", size = 12369711, upload-time = "2025-12-20T16:16:08.758Z" }, - { url = "https://files.pythonhosted.org/packages/bc/af/acbef97b630ab1bb45e6a7d01d1452e4251aa88ce680ac36e56c272120ec/numpy-2.4.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:49ff32b09f5aa0cd30a20c2b39db3e669c845589f2b7fc910365210887e39344", size = 5198355, upload-time = "2025-12-20T16:16:10.902Z" }, - { url = "https://files.pythonhosted.org/packages/c1/c8/4e0d436b66b826f2e53330adaa6311f5cac9871a5b5c31ad773b27f25a74/numpy-2.4.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:36cbfb13c152b1c7c184ddac43765db8ad672567e7bafff2cc755a09917ed2e6", size = 6545298, upload-time = "2025-12-20T16:16:12.607Z" }, - { url = "https://files.pythonhosted.org/packages/ef/27/e1f5d144ab54eac34875e79037011d511ac57b21b220063310cb96c80fbc/numpy-2.4.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35ddc8f4914466e6fc954c76527aa91aa763682a4f6d73249ef20b418fe6effb", size = 14398387, upload-time = "2025-12-20T16:16:14.257Z" }, - { url = "https://files.pythonhosted.org/packages/67/64/4cb909dd5ab09a9a5d086eff9586e69e827b88a5585517386879474f4cf7/numpy-2.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc578891de1db95b2a35001b695451767b580bb45753717498213c5ff3c41d63", size = 16363091, upload-time = "2025-12-20T16:16:17.32Z" }, - { url = "https://files.pythonhosted.org/packages/9d/9c/8efe24577523ec6809261859737cf117b0eb6fdb655abdfdc81b2e468ce4/numpy-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98e81648e0b36e325ab67e46b5400a7a6d4a22b8a7c8e8bbfe20e7db7906bf95", size = 16176394, upload-time = "2025-12-20T16:16:19.524Z" }, - { url = "https://files.pythonhosted.org/packages/61/f0/1687441ece7b47a62e45a1f82015352c240765c707928edd8aef875d5951/numpy-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d57b5046c120561ba8fa8e4030fbb8b822f3063910fa901ffadf16e2b7128ad6", size = 18287378, upload-time = "2025-12-20T16:16:22.866Z" }, - { url = "https://files.pythonhosted.org/packages/d3/6f/f868765d44e6fc466467ed810ba9d8d6db1add7d4a748abfa2a4c99a3194/numpy-2.4.0-cp312-cp312-win32.whl", hash = "sha256:92190db305a6f48734d3982f2c60fa30d6b5ee9bff10f2887b930d7b40119f4c", size = 5955432, upload-time = "2025-12-20T16:16:25.06Z" }, - { url = "https://files.pythonhosted.org/packages/d4/b5/94c1e79fcbab38d1ca15e13777477b2914dd2d559b410f96949d6637b085/numpy-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:680060061adb2d74ce352628cb798cfdec399068aa7f07ba9fb818b2b3305f98", size = 12306201, upload-time = "2025-12-20T16:16:26.979Z" }, - { url = "https://files.pythonhosted.org/packages/70/09/c39dadf0b13bb0768cd29d6a3aaff1fb7c6905ac40e9aaeca26b1c086e06/numpy-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:39699233bc72dd482da1415dcb06076e32f60eddc796a796c5fb6c5efce94667", size = 10308234, upload-time = "2025-12-20T16:16:29.417Z" }, - { url = "https://files.pythonhosted.org/packages/a7/0d/853fd96372eda07c824d24adf02e8bc92bb3731b43a9b2a39161c3667cc4/numpy-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a152d86a3ae00ba5f47b3acf3b827509fd0b6cb7d3259665e63dafbad22a75ea", size = 16649088, upload-time = "2025-12-20T16:16:31.421Z" }, - { url = "https://files.pythonhosted.org/packages/e3/37/cc636f1f2a9f585434e20a3e6e63422f70bfe4f7f6698e941db52ea1ac9a/numpy-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:39b19251dec4de8ff8496cd0806cbe27bf0684f765abb1f4809554de93785f2d", size = 12364065, upload-time = "2025-12-20T16:16:33.491Z" }, - { url = "https://files.pythonhosted.org/packages/ed/69/0b78f37ca3690969beee54103ce5f6021709134e8020767e93ba691a72f1/numpy-2.4.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:009bd0ea12d3c784b6639a8457537016ce5172109e585338e11334f6a7bb88ee", size = 5192640, upload-time = "2025-12-20T16:16:35.636Z" }, - { url = "https://files.pythonhosted.org/packages/1d/2a/08569f8252abf590294dbb09a430543ec8f8cc710383abfb3e75cc73aeda/numpy-2.4.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5fe44e277225fd3dff6882d86d3d447205d43532c3627313d17e754fb3905a0e", size = 6541556, upload-time = "2025-12-20T16:16:37.276Z" }, - { url = "https://files.pythonhosted.org/packages/93/e9/a949885a4e177493d61519377952186b6cbfdf1d6002764c664ba28349b5/numpy-2.4.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f935c4493eda9069851058fa0d9e39dbf6286be690066509305e52912714dbb2", size = 14396562, upload-time = "2025-12-20T16:16:38.953Z" }, - { url = "https://files.pythonhosted.org/packages/99/98/9d4ad53b0e9ef901c2ef1d550d2136f5ac42d3fd2988390a6def32e23e48/numpy-2.4.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cfa5f29a695cb7438965e6c3e8d06e0416060cf0d709c1b1c1653a939bf5c2a", size = 16351719, upload-time = "2025-12-20T16:16:41.503Z" }, - { url = "https://files.pythonhosted.org/packages/28/de/5f3711a38341d6e8dd619f6353251a0cdd07f3d6d101a8fd46f4ef87f895/numpy-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba0cb30acd3ef11c94dc27fbfba68940652492bc107075e7ffe23057f9425681", size = 16176053, upload-time = "2025-12-20T16:16:44.552Z" }, - { url = "https://files.pythonhosted.org/packages/2a/5b/2a3753dc43916501b4183532e7ace862e13211042bceafa253afb5c71272/numpy-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:60e8c196cd82cbbd4f130b5290007e13e6de3eca79f0d4d38014769d96a7c475", size = 18277859, upload-time = "2025-12-20T16:16:47.174Z" }, - { url = "https://files.pythonhosted.org/packages/2c/c5/a18bcdd07a941db3076ef489d036ab16d2bfc2eae0cf27e5a26e29189434/numpy-2.4.0-cp313-cp313-win32.whl", hash = "sha256:5f48cb3e88fbc294dc90e215d86fbaf1c852c63dbdb6c3a3e63f45c4b57f7344", size = 5953849, upload-time = "2025-12-20T16:16:49.554Z" }, - { url = "https://files.pythonhosted.org/packages/4f/f1/719010ff8061da6e8a26e1980cf090412d4f5f8060b31f0c45d77dd67a01/numpy-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:a899699294f28f7be8992853c0c60741f16ff199205e2e6cdca155762cbaa59d", size = 12302840, upload-time = "2025-12-20T16:16:51.227Z" }, - { url = "https://files.pythonhosted.org/packages/f5/5a/b3d259083ed8b4d335270c76966cb6cf14a5d1b69e1a608994ac57a659e6/numpy-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:9198f447e1dc5647d07c9a6bbe2063cc0132728cc7175b39dbc796da5b54920d", size = 10308509, upload-time = "2025-12-20T16:16:53.313Z" }, - { url = "https://files.pythonhosted.org/packages/31/01/95edcffd1bb6c0633df4e808130545c4f07383ab629ac7e316fb44fff677/numpy-2.4.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74623f2ab5cc3f7c886add4f735d1031a1d2be4a4ae63c0546cfd74e7a31ddf6", size = 12491815, upload-time = "2025-12-20T16:16:55.496Z" }, - { url = "https://files.pythonhosted.org/packages/59/ea/5644b8baa92cc1c7163b4b4458c8679852733fa74ca49c942cfa82ded4e0/numpy-2.4.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0804a8e4ab070d1d35496e65ffd3cf8114c136a2b81f61dfab0de4b218aacfd5", size = 5320321, upload-time = "2025-12-20T16:16:57.468Z" }, - { url = "https://files.pythonhosted.org/packages/26/4e/e10938106d70bc21319bd6a86ae726da37edc802ce35a3a71ecdf1fdfe7f/numpy-2.4.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:02a2038eb27f9443a8b266a66911e926566b5a6ffd1a689b588f7f35b81e7dc3", size = 6641635, upload-time = "2025-12-20T16:16:59.379Z" }, - { url = "https://files.pythonhosted.org/packages/b3/8d/a8828e3eaf5c0b4ab116924df82f24ce3416fa38d0674d8f708ddc6c8aac/numpy-2.4.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1889b3a3f47a7b5bee16bc25a2145bd7cb91897f815ce3499db64c7458b6d91d", size = 14456053, upload-time = "2025-12-20T16:17:01.768Z" }, - { url = "https://files.pythonhosted.org/packages/68/a1/17d97609d87d4520aa5ae2dcfb32305654550ac6a35effb946d303e594ce/numpy-2.4.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85eef4cb5625c47ee6425c58a3502555e10f45ee973da878ac8248ad58c136f3", size = 16401702, upload-time = "2025-12-20T16:17:04.235Z" }, - { url = "https://files.pythonhosted.org/packages/18/32/0f13c1b2d22bea1118356b8b963195446f3af124ed7a5adfa8fdecb1b6ca/numpy-2.4.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6dc8b7e2f4eb184b37655195f421836cfae6f58197b67e3ffc501f1333d993fa", size = 16242493, upload-time = "2025-12-20T16:17:06.856Z" }, - { url = "https://files.pythonhosted.org/packages/ae/23/48f21e3d309fbc137c068a1475358cbd3a901b3987dcfc97a029ab3068e2/numpy-2.4.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:44aba2f0cafd287871a495fb3163408b0bd25bbce135c6f621534a07f4f7875c", size = 18324222, upload-time = "2025-12-20T16:17:09.392Z" }, - { url = "https://files.pythonhosted.org/packages/ac/52/41f3d71296a3dcaa4f456aaa3c6fc8e745b43d0552b6bde56571bb4b4a0f/numpy-2.4.0-cp313-cp313t-win32.whl", hash = "sha256:20c115517513831860c573996e395707aa9fb691eb179200125c250e895fcd93", size = 6076216, upload-time = "2025-12-20T16:17:11.437Z" }, - { url = "https://files.pythonhosted.org/packages/35/ff/46fbfe60ab0710d2a2b16995f708750307d30eccbb4c38371ea9e986866e/numpy-2.4.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b48e35f4ab6f6a7597c46e301126ceba4c44cd3280e3750f85db48b082624fa4", size = 12444263, upload-time = "2025-12-20T16:17:13.182Z" }, - { url = "https://files.pythonhosted.org/packages/a3/e3/9189ab319c01d2ed556c932ccf55064c5d75bb5850d1df7a482ce0badead/numpy-2.4.0-cp313-cp313t-win_arm64.whl", hash = "sha256:4d1cfce39e511069b11e67cd0bd78ceff31443b7c9e5c04db73c7a19f572967c", size = 10378265, upload-time = "2025-12-20T16:17:15.211Z" }, - { url = "https://files.pythonhosted.org/packages/ab/ed/52eac27de39d5e5a6c9aadabe672bc06f55e24a3d9010cd1183948055d76/numpy-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c95eb6db2884917d86cde0b4d4cf31adf485c8ec36bf8696dd66fa70de96f36b", size = 16647476, upload-time = "2025-12-20T16:17:17.671Z" }, - { url = "https://files.pythonhosted.org/packages/77/c0/990ce1b7fcd4e09aeaa574e2a0a839589e4b08b2ca68070f1acb1fea6736/numpy-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:65167da969cd1ec3a1df31cb221ca3a19a8aaa25370ecb17d428415e93c1935e", size = 12374563, upload-time = "2025-12-20T16:17:20.216Z" }, - { url = "https://files.pythonhosted.org/packages/37/7c/8c5e389c6ae8f5fd2277a988600d79e9625db3fff011a2d87ac80b881a4c/numpy-2.4.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:3de19cfecd1465d0dcf8a5b5ea8b3155b42ed0b639dba4b71e323d74f2a3be5e", size = 5203107, upload-time = "2025-12-20T16:17:22.47Z" }, - { url = "https://files.pythonhosted.org/packages/e6/94/ca5b3bd6a8a70a5eec9a0b8dd7f980c1eff4b8a54970a9a7fef248ef564f/numpy-2.4.0-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:6c05483c3136ac4c91b4e81903cb53a8707d316f488124d0398499a4f8e8ef51", size = 6538067, upload-time = "2025-12-20T16:17:24.001Z" }, - { url = "https://files.pythonhosted.org/packages/79/43/993eb7bb5be6761dde2b3a3a594d689cec83398e3f58f4758010f3b85727/numpy-2.4.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36667db4d6c1cea79c8930ab72fadfb4060feb4bfe724141cd4bd064d2e5f8ce", size = 14411926, upload-time = "2025-12-20T16:17:25.822Z" }, - { url = "https://files.pythonhosted.org/packages/03/75/d4c43b61de473912496317a854dac54f1efec3eeb158438da6884b70bb90/numpy-2.4.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9a818668b674047fd88c4cddada7ab8f1c298812783e8328e956b78dc4807f9f", size = 16354295, upload-time = "2025-12-20T16:17:28.308Z" }, - { url = "https://files.pythonhosted.org/packages/b8/0a/b54615b47ee8736a6461a4bb6749128dd3435c5a759d5663f11f0e9af4ac/numpy-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1ee32359fb7543b7b7bd0b2f46294db27e29e7bbdf70541e81b190836cd83ded", size = 16190242, upload-time = "2025-12-20T16:17:30.993Z" }, - { url = "https://files.pythonhosted.org/packages/98/ce/ea207769aacad6246525ec6c6bbd66a2bf56c72443dc10e2f90feed29290/numpy-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e493962256a38f58283de033d8af176c5c91c084ea30f15834f7545451c42059", size = 18280875, upload-time = "2025-12-20T16:17:33.327Z" }, - { url = "https://files.pythonhosted.org/packages/17/ef/ec409437aa962ea372ed601c519a2b141701683ff028f894b7466f0ab42b/numpy-2.4.0-cp314-cp314-win32.whl", hash = "sha256:6bbaebf0d11567fa8926215ae731e1d58e6ec28a8a25235b8a47405d301332db", size = 6002530, upload-time = "2025-12-20T16:17:35.729Z" }, - { url = "https://files.pythonhosted.org/packages/5f/4a/5cb94c787a3ed1ac65e1271b968686521169a7b3ec0b6544bb3ca32960b0/numpy-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:3d857f55e7fdf7c38ab96c4558c95b97d1c685be6b05c249f5fdafcbd6f9899e", size = 12435890, upload-time = "2025-12-20T16:17:37.599Z" }, - { url = "https://files.pythonhosted.org/packages/48/a0/04b89db963af9de1104975e2544f30de89adbf75b9e75f7dd2599be12c79/numpy-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:bb50ce5fb202a26fd5404620e7ef820ad1ab3558b444cb0b55beb7ef66cd2d63", size = 10591892, upload-time = "2025-12-20T16:17:39.649Z" }, - { url = "https://files.pythonhosted.org/packages/53/e5/d74b5ccf6712c06c7a545025a6a71bfa03bdc7e0568b405b0d655232fd92/numpy-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:355354388cba60f2132df297e2d53053d4063f79077b67b481d21276d61fc4df", size = 12494312, upload-time = "2025-12-20T16:17:41.714Z" }, - { url = "https://files.pythonhosted.org/packages/c2/08/3ca9cc2ddf54dfee7ae9a6479c071092a228c68aef08252aa08dac2af002/numpy-2.4.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:1d8f9fde5f6dc1b6fc34df8162f3b3079365468703fee7f31d4e0cc8c63baed9", size = 5322862, upload-time = "2025-12-20T16:17:44.145Z" }, - { url = "https://files.pythonhosted.org/packages/87/74/0bb63a68394c0c1e52670cfff2e309afa41edbe11b3327d9af29e4383f34/numpy-2.4.0-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e0434aa22c821f44eeb4c650b81c7fbdd8c0122c6c4b5a576a76d5a35625ecd9", size = 6644986, upload-time = "2025-12-20T16:17:46.203Z" }, - { url = "https://files.pythonhosted.org/packages/06/8f/9264d9bdbcf8236af2823623fe2f3981d740fc3461e2787e231d97c38c28/numpy-2.4.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:40483b2f2d3ba7aad426443767ff5632ec3156ef09742b96913787d13c336471", size = 14457958, upload-time = "2025-12-20T16:17:48.017Z" }, - { url = "https://files.pythonhosted.org/packages/8c/d9/f9a69ae564bbc7236a35aa883319364ef5fd41f72aa320cc1cbe66148fe2/numpy-2.4.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9e6a7664ddd9746e20b7325351fe1a8408d0a2bf9c63b5e898290ddc8f09544", size = 16398394, upload-time = "2025-12-20T16:17:50.409Z" }, - { url = "https://files.pythonhosted.org/packages/34/c7/39241501408dde7f885d241a98caba5421061a2c6d2b2197ac5e3aa842d8/numpy-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ecb0019d44f4cdb50b676c5d0cb4b1eae8e15d1ed3d3e6639f986fc92b2ec52c", size = 16241044, upload-time = "2025-12-20T16:17:52.661Z" }, - { url = "https://files.pythonhosted.org/packages/7c/95/cae7effd90e065a95e59fe710eeee05d7328ed169776dfdd9f789e032125/numpy-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d0ffd9e2e4441c96a9c91ec1783285d80bf835b677853fc2770a89d50c1e48ac", size = 18321772, upload-time = "2025-12-20T16:17:54.947Z" }, - { url = "https://files.pythonhosted.org/packages/96/df/3c6c279accd2bfb968a76298e5b276310bd55d243df4fa8ac5816d79347d/numpy-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:77f0d13fa87036d7553bf81f0e1fe3ce68d14c9976c9851744e4d3e91127e95f", size = 6148320, upload-time = "2025-12-20T16:17:57.249Z" }, - { url = "https://files.pythonhosted.org/packages/92/8d/f23033cce252e7a75cae853d17f582e86534c46404dea1c8ee094a9d6d84/numpy-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b1f5b45829ac1848893f0ddf5cb326110604d6df96cdc255b0bf9edd154104d4", size = 12623460, upload-time = "2025-12-20T16:17:58.963Z" }, - { url = "https://files.pythonhosted.org/packages/a4/4f/1f8475907d1a7c4ef9020edf7f39ea2422ec896849245f00688e4b268a71/numpy-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:23a3e9d1a6f360267e8fbb38ba5db355a6a7e9be71d7fce7ab3125e88bb646c8", size = 10661799, upload-time = "2025-12-20T16:18:01.078Z" }, - { url = "https://files.pythonhosted.org/packages/4b/ef/088e7c7342f300aaf3ee5f2c821c4b9996a1bef2aaf6a49cc8ab4883758e/numpy-2.4.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b54c83f1c0c0f1d748dca0af516062b8829d53d1f0c402be24b4257a9c48ada6", size = 16819003, upload-time = "2025-12-20T16:18:03.41Z" }, - { url = "https://files.pythonhosted.org/packages/ff/ce/a53017b5443b4b84517182d463fc7bcc2adb4faa8b20813f8e5f5aeb5faa/numpy-2.4.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:aabb081ca0ec5d39591fc33018cd4b3f96e1a2dd6756282029986d00a785fba4", size = 12567105, upload-time = "2025-12-20T16:18:05.594Z" }, - { url = "https://files.pythonhosted.org/packages/77/58/5ff91b161f2ec650c88a626c3905d938c89aaadabd0431e6d9c1330c83e2/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:8eafe7c36c8430b7794edeab3087dec7bf31d634d92f2af9949434b9d1964cba", size = 5395590, upload-time = "2025-12-20T16:18:08.031Z" }, - { url = "https://files.pythonhosted.org/packages/1d/4e/f1a084106df8c2df8132fc437e56987308e0524836aa7733721c8429d4fe/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2f585f52b2baf07ff3356158d9268ea095e221371f1074fadea2f42544d58b4d", size = 6709947, upload-time = "2025-12-20T16:18:09.836Z" }, - { url = "https://files.pythonhosted.org/packages/63/09/3d8aeb809c0332c3f642da812ac2e3d74fc9252b3021f8c30c82e99e3f3d/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32ed06d0fe9cae27d8fb5f400c63ccee72370599c75e683a6358dd3a4fb50aaf", size = 14535119, upload-time = "2025-12-20T16:18:12.105Z" }, - { url = "https://files.pythonhosted.org/packages/fd/7f/68f0fc43a2cbdc6bb239160c754d87c922f60fbaa0fa3cd3d312b8a7f5ee/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:57c540ed8fb1f05cb997c6761cd56db72395b0d6985e90571ff660452ade4f98", size = 16475815, upload-time = "2025-12-20T16:18:14.433Z" }, - { url = "https://files.pythonhosted.org/packages/11/73/edeacba3167b1ca66d51b1a5a14697c2c40098b5ffa01811c67b1785a5ab/numpy-2.4.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a39fb973a726e63223287adc6dafe444ce75af952d711e400f3bf2b36ef55a7b", size = 12489376, upload-time = "2025-12-20T16:18:16.524Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/24/62/ae72ff66c0f1fd959925b4c11f8c2dea61f47f6acaea75a08512cdfe3fed/numpy-2.4.1.tar.gz", hash = "sha256:a1ceafc5042451a858231588a104093474c6a5c57dcc724841f5c888d237d690", size = 20721320, upload-time = "2026-01-10T06:44:59.619Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/34/2b1bc18424f3ad9af577f6ce23600319968a70575bd7db31ce66731bbef9/numpy-2.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0cce2a669e3c8ba02ee563c7835f92c153cf02edff1ae05e1823f1dde21b16a5", size = 16944563, upload-time = "2026-01-10T06:42:14.615Z" }, + { url = "https://files.pythonhosted.org/packages/2c/57/26e5f97d075aef3794045a6ca9eada6a4ed70eb9a40e7a4a93f9ac80d704/numpy-2.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:899d2c18024984814ac7e83f8f49d8e8180e2fbe1b2e252f2e7f1d06bea92425", size = 12645658, upload-time = "2026-01-10T06:42:17.298Z" }, + { url = "https://files.pythonhosted.org/packages/8e/ba/80fc0b1e3cb2fd5c6143f00f42eb67762aa043eaa05ca924ecc3222a7849/numpy-2.4.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:09aa8a87e45b55a1c2c205d42e2808849ece5c484b2aab11fecabec3841cafba", size = 5474132, upload-time = "2026-01-10T06:42:19.637Z" }, + { url = "https://files.pythonhosted.org/packages/40/ae/0a5b9a397f0e865ec171187c78d9b57e5588afc439a04ba9cab1ebb2c945/numpy-2.4.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:edee228f76ee2dab4579fad6f51f6a305de09d444280109e0f75df247ff21501", size = 6804159, upload-time = "2026-01-10T06:42:21.44Z" }, + { url = "https://files.pythonhosted.org/packages/86/9c/841c15e691c7085caa6fd162f063eff494099c8327aeccd509d1ab1e36ab/numpy-2.4.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a92f227dbcdc9e4c3e193add1a189a9909947d4f8504c576f4a732fd0b54240a", size = 14708058, upload-time = "2026-01-10T06:42:23.546Z" }, + { url = "https://files.pythonhosted.org/packages/5d/9d/7862db06743f489e6a502a3b93136d73aea27d97b2cf91504f70a27501d6/numpy-2.4.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:538bf4ec353709c765ff75ae616c34d3c3dca1a68312727e8f2676ea644f8509", size = 16651501, upload-time = "2026-01-10T06:42:25.909Z" }, + { url = "https://files.pythonhosted.org/packages/a6/9c/6fc34ebcbd4015c6e5f0c0ce38264010ce8a546cb6beacb457b84a75dfc8/numpy-2.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ac08c63cb7779b85e9d5318e6c3518b424bc1f364ac4cb2c6136f12e5ff2dccc", size = 16492627, upload-time = "2026-01-10T06:42:28.938Z" }, + { url = "https://files.pythonhosted.org/packages/aa/63/2494a8597502dacda439f61b3c0db4da59928150e62be0e99395c3ad23c5/numpy-2.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4f9c360ecef085e5841c539a9a12b883dff005fbd7ce46722f5e9cef52634d82", size = 18585052, upload-time = "2026-01-10T06:42:31.312Z" }, + { url = "https://files.pythonhosted.org/packages/6a/93/098e1162ae7522fc9b618d6272b77404c4656c72432ecee3abc029aa3de0/numpy-2.4.1-cp311-cp311-win32.whl", hash = "sha256:0f118ce6b972080ba0758c6087c3617b5ba243d806268623dc34216d69099ba0", size = 6236575, upload-time = "2026-01-10T06:42:33.872Z" }, + { url = "https://files.pythonhosted.org/packages/8c/de/f5e79650d23d9e12f38a7bc6b03ea0835b9575494f8ec94c11c6e773b1b1/numpy-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:18e14c4d09d55eef39a6ab5b08406e84bc6869c1e34eef45564804f90b7e0574", size = 12604479, upload-time = "2026-01-10T06:42:35.778Z" }, + { url = "https://files.pythonhosted.org/packages/dd/65/e1097a7047cff12ce3369bd003811516b20ba1078dbdec135e1cd7c16c56/numpy-2.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:6461de5113088b399d655d45c3897fa188766415d0f568f175ab071c8873bd73", size = 10578325, upload-time = "2026-01-10T06:42:38.518Z" }, + { url = "https://files.pythonhosted.org/packages/78/7f/ec53e32bf10c813604edf07a3682616bd931d026fcde7b6d13195dfb684a/numpy-2.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d3703409aac693fa82c0aee023a1ae06a6e9d065dba10f5e8e80f642f1e9d0a2", size = 16656888, upload-time = "2026-01-10T06:42:40.913Z" }, + { url = "https://files.pythonhosted.org/packages/b8/e0/1f9585d7dae8f14864e948fd7fa86c6cb72dee2676ca2748e63b1c5acfe0/numpy-2.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7211b95ca365519d3596a1d8688a95874cc94219d417504d9ecb2df99fa7bfa8", size = 12373956, upload-time = "2026-01-10T06:42:43.091Z" }, + { url = "https://files.pythonhosted.org/packages/8e/43/9762e88909ff2326f5e7536fa8cb3c49fb03a7d92705f23e6e7f553d9cb3/numpy-2.4.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5adf01965456a664fc727ed69cc71848f28d063217c63e1a0e200a118d5eec9a", size = 5202567, upload-time = "2026-01-10T06:42:45.107Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ee/34b7930eb61e79feb4478800a4b95b46566969d837546aa7c034c742ef98/numpy-2.4.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:26f0bcd9c79a00e339565b303badc74d3ea2bd6d52191eeca5f95936cad107d0", size = 6549459, upload-time = "2026-01-10T06:42:48.152Z" }, + { url = "https://files.pythonhosted.org/packages/79/e3/5f115fae982565771be994867c89bcd8d7208dbfe9469185497d70de5ddf/numpy-2.4.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0093e85df2960d7e4049664b26afc58b03236e967fb942354deef3208857a04c", size = 14404859, upload-time = "2026-01-10T06:42:49.947Z" }, + { url = "https://files.pythonhosted.org/packages/d9/7d/9c8a781c88933725445a859cac5d01b5871588a15969ee6aeb618ba99eee/numpy-2.4.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ad270f438cbdd402c364980317fb6b117d9ec5e226fff5b4148dd9aa9fc6e02", size = 16371419, upload-time = "2026-01-10T06:42:52.409Z" }, + { url = "https://files.pythonhosted.org/packages/a6/d2/8aa084818554543f17cf4162c42f162acbd3bb42688aefdba6628a859f77/numpy-2.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:297c72b1b98100c2e8f873d5d35fb551fce7040ade83d67dd51d38c8d42a2162", size = 16182131, upload-time = "2026-01-10T06:42:54.694Z" }, + { url = "https://files.pythonhosted.org/packages/60/db/0425216684297c58a8df35f3284ef56ec4a043e6d283f8a59c53562caf1b/numpy-2.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf6470d91d34bf669f61d515499859fa7a4c2f7c36434afb70e82df7217933f9", size = 18295342, upload-time = "2026-01-10T06:42:56.991Z" }, + { url = "https://files.pythonhosted.org/packages/31/4c/14cb9d86240bd8c386c881bafbe43f001284b7cce3bc01623ac9475da163/numpy-2.4.1-cp312-cp312-win32.whl", hash = "sha256:b6bcf39112e956594b3331316d90c90c90fb961e39696bda97b89462f5f3943f", size = 5959015, upload-time = "2026-01-10T06:42:59.631Z" }, + { url = "https://files.pythonhosted.org/packages/51/cf/52a703dbeb0c65807540d29699fef5fda073434ff61846a564d5c296420f/numpy-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:e1a27bb1b2dee45a2a53f5ca6ff2d1a7f135287883a1689e930d44d1ff296c87", size = 12310730, upload-time = "2026-01-10T06:43:01.627Z" }, + { url = "https://files.pythonhosted.org/packages/69/80/a828b2d0ade5e74a9fe0f4e0a17c30fdc26232ad2bc8c9f8b3197cf7cf18/numpy-2.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:0e6e8f9d9ecf95399982019c01223dc130542960a12edfa8edd1122dfa66a8a8", size = 10312166, upload-time = "2026-01-10T06:43:03.673Z" }, + { url = "https://files.pythonhosted.org/packages/04/68/732d4b7811c00775f3bd522a21e8dd5a23f77eb11acdeb663e4a4ebf0ef4/numpy-2.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d797454e37570cfd61143b73b8debd623c3c0952959adb817dd310a483d58a1b", size = 16652495, upload-time = "2026-01-10T06:43:06.283Z" }, + { url = "https://files.pythonhosted.org/packages/20/ca/857722353421a27f1465652b2c66813eeeccea9d76d5f7b74b99f298e60e/numpy-2.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82c55962006156aeef1629b953fd359064aa47e4d82cfc8e67f0918f7da3344f", size = 12368657, upload-time = "2026-01-10T06:43:09.094Z" }, + { url = "https://files.pythonhosted.org/packages/81/0d/2377c917513449cc6240031a79d30eb9a163d32a91e79e0da47c43f2c0c8/numpy-2.4.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:71abbea030f2cfc3092a0ff9f8c8fdefdc5e0bf7d9d9c99663538bb0ecdac0b9", size = 5197256, upload-time = "2026-01-10T06:43:13.634Z" }, + { url = "https://files.pythonhosted.org/packages/17/39/569452228de3f5de9064ac75137082c6214be1f5c532016549a7923ab4b5/numpy-2.4.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5b55aa56165b17aaf15520beb9cbd33c9039810e0d9643dd4379e44294c7303e", size = 6545212, upload-time = "2026-01-10T06:43:15.661Z" }, + { url = "https://files.pythonhosted.org/packages/8c/a4/77333f4d1e4dac4395385482557aeecf4826e6ff517e32ca48e1dafbe42a/numpy-2.4.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0faba4a331195bfa96f93dd9dfaa10b2c7aa8cda3a02b7fd635e588fe821bf5", size = 14402871, upload-time = "2026-01-10T06:43:17.324Z" }, + { url = "https://files.pythonhosted.org/packages/ba/87/d341e519956273b39d8d47969dd1eaa1af740615394fe67d06f1efa68773/numpy-2.4.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e3087f53e2b4428766b54932644d148613c5a595150533ae7f00dab2f319a8", size = 16359305, upload-time = "2026-01-10T06:43:19.376Z" }, + { url = "https://files.pythonhosted.org/packages/32/91/789132c6666288eaa20ae8066bb99eba1939362e8f1a534949a215246e97/numpy-2.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:49e792ec351315e16da54b543db06ca8a86985ab682602d90c60ef4ff4db2a9c", size = 16181909, upload-time = "2026-01-10T06:43:21.808Z" }, + { url = "https://files.pythonhosted.org/packages/cf/b8/090b8bd27b82a844bb22ff8fdf7935cb1980b48d6e439ae116f53cdc2143/numpy-2.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79e9e06c4c2379db47f3f6fc7a8652e7498251789bf8ff5bd43bf478ef314ca2", size = 18284380, upload-time = "2026-01-10T06:43:23.957Z" }, + { url = "https://files.pythonhosted.org/packages/67/78/722b62bd31842ff029412271556a1a27a98f45359dea78b1548a3a9996aa/numpy-2.4.1-cp313-cp313-win32.whl", hash = "sha256:3d1a100e48cb266090a031397863ff8a30050ceefd798f686ff92c67a486753d", size = 5957089, upload-time = "2026-01-10T06:43:27.535Z" }, + { url = "https://files.pythonhosted.org/packages/da/a6/cf32198b0b6e18d4fbfa9a21a992a7fca535b9bb2b0cdd217d4a3445b5ca/numpy-2.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:92a0e65272fd60bfa0d9278e0484c2f52fe03b97aedc02b357f33fe752c52ffb", size = 12307230, upload-time = "2026-01-10T06:43:29.298Z" }, + { url = "https://files.pythonhosted.org/packages/44/6c/534d692bfb7d0afe30611320c5fb713659dcb5104d7cc182aff2aea092f5/numpy-2.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:20d4649c773f66cc2fc36f663e091f57c3b7655f936a4c681b4250855d1da8f5", size = 10313125, upload-time = "2026-01-10T06:43:31.782Z" }, + { url = "https://files.pythonhosted.org/packages/da/a1/354583ac5c4caa566de6ddfbc42744409b515039e085fab6e0ff942e0df5/numpy-2.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f93bc6892fe7b0663e5ffa83b61aab510aacffd58c16e012bb9352d489d90cb7", size = 12496156, upload-time = "2026-01-10T06:43:34.237Z" }, + { url = "https://files.pythonhosted.org/packages/51/b0/42807c6e8cce58c00127b1dc24d365305189991f2a7917aa694a109c8d7d/numpy-2.4.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:178de8f87948163d98a4c9ab5bee4ce6519ca918926ec8df195af582de28544d", size = 5324663, upload-time = "2026-01-10T06:43:36.211Z" }, + { url = "https://files.pythonhosted.org/packages/fe/55/7a621694010d92375ed82f312b2f28017694ed784775269115323e37f5e2/numpy-2.4.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:98b35775e03ab7f868908b524fc0a84d38932d8daf7b7e1c3c3a1b6c7a2c9f15", size = 6645224, upload-time = "2026-01-10T06:43:37.884Z" }, + { url = "https://files.pythonhosted.org/packages/50/96/9fa8635ed9d7c847d87e30c834f7109fac5e88549d79ef3324ab5c20919f/numpy-2.4.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:941c2a93313d030f219f3a71fd3d91a728b82979a5e8034eb2e60d394a2b83f9", size = 14462352, upload-time = "2026-01-10T06:43:39.479Z" }, + { url = "https://files.pythonhosted.org/packages/03/d1/8cf62d8bb2062da4fb82dd5d49e47c923f9c0738032f054e0a75342faba7/numpy-2.4.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:529050522e983e00a6c1c6b67411083630de8b57f65e853d7b03d9281b8694d2", size = 16407279, upload-time = "2026-01-10T06:43:41.93Z" }, + { url = "https://files.pythonhosted.org/packages/86/1c/95c86e17c6b0b31ce6ef219da00f71113b220bcb14938c8d9a05cee0ff53/numpy-2.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2302dc0224c1cbc49bb94f7064f3f923a971bfae45c33870dcbff63a2a550505", size = 16248316, upload-time = "2026-01-10T06:43:44.121Z" }, + { url = "https://files.pythonhosted.org/packages/30/b4/e7f5ff8697274c9d0fa82398b6a372a27e5cef069b37df6355ccb1f1db1a/numpy-2.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9171a42fcad32dcf3fa86f0a4faa5e9f8facefdb276f54b8b390d90447cff4e2", size = 18329884, upload-time = "2026-01-10T06:43:46.613Z" }, + { url = "https://files.pythonhosted.org/packages/37/a4/b073f3e9d77f9aec8debe8ca7f9f6a09e888ad1ba7488f0c3b36a94c03ac/numpy-2.4.1-cp313-cp313t-win32.whl", hash = "sha256:382ad67d99ef49024f11d1ce5dcb5ad8432446e4246a4b014418ba3a1175a1f4", size = 6081138, upload-time = "2026-01-10T06:43:48.854Z" }, + { url = "https://files.pythonhosted.org/packages/16/16/af42337b53844e67752a092481ab869c0523bc95c4e5c98e4dac4e9581ac/numpy-2.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:62fea415f83ad8fdb6c20840578e5fbaf5ddd65e0ec6c3c47eda0f69da172510", size = 12447478, upload-time = "2026-01-10T06:43:50.476Z" }, + { url = "https://files.pythonhosted.org/packages/6c/f8/fa85b2eac68ec631d0b631abc448552cb17d39afd17ec53dcbcc3537681a/numpy-2.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:a7870e8c5fc11aef57d6fea4b4085e537a3a60ad2cdd14322ed531fdca68d261", size = 10382981, upload-time = "2026-01-10T06:43:52.575Z" }, + { url = "https://files.pythonhosted.org/packages/1b/a7/ef08d25698e0e4b4efbad8d55251d20fe2a15f6d9aa7c9b30cd03c165e6f/numpy-2.4.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:3869ea1ee1a1edc16c29bbe3a2f2a4e515cc3a44d43903ad41e0cacdbaf733dc", size = 16652046, upload-time = "2026-01-10T06:43:54.797Z" }, + { url = "https://files.pythonhosted.org/packages/8f/39/e378b3e3ca13477e5ac70293ec027c438d1927f18637e396fe90b1addd72/numpy-2.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e867df947d427cdd7a60e3e271729090b0f0df80f5f10ab7dd436f40811699c3", size = 12378858, upload-time = "2026-01-10T06:43:57.099Z" }, + { url = "https://files.pythonhosted.org/packages/c3/74/7ec6154f0006910ed1fdbb7591cf4432307033102b8a22041599935f8969/numpy-2.4.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:e3bd2cb07841166420d2fa7146c96ce00cb3410664cbc1a6be028e456c4ee220", size = 5207417, upload-time = "2026-01-10T06:43:59.037Z" }, + { url = "https://files.pythonhosted.org/packages/f7/b7/053ac11820d84e42f8feea5cb81cc4fcd1091499b45b1ed8c7415b1bf831/numpy-2.4.1-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:f0a90aba7d521e6954670550e561a4cb925713bd944445dbe9e729b71f6cabee", size = 6542643, upload-time = "2026-01-10T06:44:01.852Z" }, + { url = "https://files.pythonhosted.org/packages/c0/c4/2e7908915c0e32ca636b92e4e4a3bdec4cb1e7eb0f8aedf1ed3c68a0d8cd/numpy-2.4.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d558123217a83b2d1ba316b986e9248a1ed1971ad495963d555ccd75dcb1556", size = 14418963, upload-time = "2026-01-10T06:44:04.047Z" }, + { url = "https://files.pythonhosted.org/packages/eb/c0/3ed5083d94e7ffd7c404e54619c088e11f2e1939a9544f5397f4adb1b8ba/numpy-2.4.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2f44de05659b67d20499cbc96d49f2650769afcb398b79b324bb6e297bfe3844", size = 16363811, upload-time = "2026-01-10T06:44:06.207Z" }, + { url = "https://files.pythonhosted.org/packages/0e/68/42b66f1852bf525050a67315a4fb94586ab7e9eaa541b1bef530fab0c5dd/numpy-2.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:69e7419c9012c4aaf695109564e3387f1259f001b4326dfa55907b098af082d3", size = 16197643, upload-time = "2026-01-10T06:44:08.33Z" }, + { url = "https://files.pythonhosted.org/packages/d2/40/e8714fc933d85f82c6bfc7b998a0649ad9769a32f3494ba86598aaf18a48/numpy-2.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2ffd257026eb1b34352e749d7cc1678b5eeec3e329ad8c9965a797e08ccba205", size = 18289601, upload-time = "2026-01-10T06:44:10.841Z" }, + { url = "https://files.pythonhosted.org/packages/80/9a/0d44b468cad50315127e884802351723daca7cf1c98d102929468c81d439/numpy-2.4.1-cp314-cp314-win32.whl", hash = "sha256:727c6c3275ddefa0dc078524a85e064c057b4f4e71ca5ca29a19163c607be745", size = 6005722, upload-time = "2026-01-10T06:44:13.332Z" }, + { url = "https://files.pythonhosted.org/packages/7e/bb/c6513edcce5a831810e2dddc0d3452ce84d208af92405a0c2e58fd8e7881/numpy-2.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:7d5d7999df434a038d75a748275cd6c0094b0ecdb0837342b332a82defc4dc4d", size = 12438590, upload-time = "2026-01-10T06:44:15.006Z" }, + { url = "https://files.pythonhosted.org/packages/e9/da/a598d5cb260780cf4d255102deba35c1d072dc028c4547832f45dd3323a8/numpy-2.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:ce9ce141a505053b3c7bce3216071f3bf5c182b8b28930f14cd24d43932cd2df", size = 10596180, upload-time = "2026-01-10T06:44:17.386Z" }, + { url = "https://files.pythonhosted.org/packages/de/bc/ea3f2c96fcb382311827231f911723aeff596364eb6e1b6d1d91128aa29b/numpy-2.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:4e53170557d37ae404bf8d542ca5b7c629d6efa1117dac6a83e394142ea0a43f", size = 12498774, upload-time = "2026-01-10T06:44:19.467Z" }, + { url = "https://files.pythonhosted.org/packages/aa/ab/ef9d939fe4a812648c7a712610b2ca6140b0853c5efea361301006c02ae5/numpy-2.4.1-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:a73044b752f5d34d4232f25f18160a1cc418ea4507f5f11e299d8ac36875f8a0", size = 5327274, upload-time = "2026-01-10T06:44:23.189Z" }, + { url = "https://files.pythonhosted.org/packages/bd/31/d381368e2a95c3b08b8cf7faac6004849e960f4a042d920337f71cef0cae/numpy-2.4.1-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:fb1461c99de4d040666ca0444057b06541e5642f800b71c56e6ea92d6a853a0c", size = 6648306, upload-time = "2026-01-10T06:44:25.012Z" }, + { url = "https://files.pythonhosted.org/packages/c8/e5/0989b44ade47430be6323d05c23207636d67d7362a1796ccbccac6773dd2/numpy-2.4.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423797bdab2eeefbe608d7c1ec7b2b4fd3c58d51460f1ee26c7500a1d9c9ee93", size = 14464653, upload-time = "2026-01-10T06:44:26.706Z" }, + { url = "https://files.pythonhosted.org/packages/10/a7/cfbe475c35371cae1358e61f20c5f075badc18c4797ab4354140e1d283cf/numpy-2.4.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:52b5f61bdb323b566b528899cc7db2ba5d1015bda7ea811a8bcf3c89c331fa42", size = 16405144, upload-time = "2026-01-10T06:44:29.378Z" }, + { url = "https://files.pythonhosted.org/packages/f8/a3/0c63fe66b534888fa5177cc7cef061541064dbe2b4b60dcc60ffaf0d2157/numpy-2.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42d7dd5fa36d16d52a84f821eb96031836fd405ee6955dd732f2023724d0aa01", size = 16247425, upload-time = "2026-01-10T06:44:31.721Z" }, + { url = "https://files.pythonhosted.org/packages/6b/2b/55d980cfa2c93bd40ff4c290bf824d792bd41d2fe3487b07707559071760/numpy-2.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e7b6b5e28bbd47b7532698e5db2fe1db693d84b58c254e4389d99a27bb9b8f6b", size = 18330053, upload-time = "2026-01-10T06:44:34.617Z" }, + { url = "https://files.pythonhosted.org/packages/23/12/8b5fc6b9c487a09a7957188e0943c9ff08432c65e34567cabc1623b03a51/numpy-2.4.1-cp314-cp314t-win32.whl", hash = "sha256:5de60946f14ebe15e713a6f22850c2372fa72f4ff9a432ab44aa90edcadaa65a", size = 6152482, upload-time = "2026-01-10T06:44:36.798Z" }, + { url = "https://files.pythonhosted.org/packages/00/a5/9f8ca5856b8940492fc24fbe13c1bc34d65ddf4079097cf9e53164d094e1/numpy-2.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:8f085da926c0d491ffff3096f91078cc97ea67e7e6b65e490bc8dcda65663be2", size = 12627117, upload-time = "2026-01-10T06:44:38.828Z" }, + { url = "https://files.pythonhosted.org/packages/ad/0d/eca3d962f9eef265f01a8e0d20085c6dd1f443cbffc11b6dede81fd82356/numpy-2.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:6436cffb4f2bf26c974344439439c95e152c9a527013f26b3577be6c2ca64295", size = 10667121, upload-time = "2026-01-10T06:44:41.644Z" }, + { url = "https://files.pythonhosted.org/packages/1e/48/d86f97919e79314a1cdee4c832178763e6e98e623e123d0bada19e92c15a/numpy-2.4.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8ad35f20be147a204e28b6a0575fbf3540c5e5f802634d4258d55b1ff5facce1", size = 16822202, upload-time = "2026-01-10T06:44:43.738Z" }, + { url = "https://files.pythonhosted.org/packages/51/e9/1e62a7f77e0f37dcfb0ad6a9744e65df00242b6ea37dfafb55debcbf5b55/numpy-2.4.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:8097529164c0f3e32bb89412a0905d9100bf434d9692d9fc275e18dcf53c9344", size = 12569985, upload-time = "2026-01-10T06:44:45.945Z" }, + { url = "https://files.pythonhosted.org/packages/c7/7e/914d54f0c801342306fdcdce3e994a56476f1b818c46c47fc21ae968088c/numpy-2.4.1-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:ea66d2b41ca4a1630aae5507ee0a71647d3124d1741980138aa8f28f44dac36e", size = 5398484, upload-time = "2026-01-10T06:44:48.012Z" }, + { url = "https://files.pythonhosted.org/packages/1c/d8/9570b68584e293a33474e7b5a77ca404f1dcc655e40050a600dee81d27fb/numpy-2.4.1-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:d3f8f0df9f4b8be57b3bf74a1d087fec68f927a2fab68231fdb442bf2c12e426", size = 6713216, upload-time = "2026-01-10T06:44:49.725Z" }, + { url = "https://files.pythonhosted.org/packages/33/9b/9dd6e2db8d49eb24f86acaaa5258e5f4c8ed38209a4ee9de2d1a0ca25045/numpy-2.4.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2023ef86243690c2791fd6353e5b4848eedaa88ca8a2d129f462049f6d484696", size = 14538937, upload-time = "2026-01-10T06:44:51.498Z" }, + { url = "https://files.pythonhosted.org/packages/53/87/d5bd995b0f798a37105b876350d346eea5838bd8f77ea3d7a48392f3812b/numpy-2.4.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8361ea4220d763e54cff2fbe7d8c93526b744f7cd9ddab47afeff7e14e8503be", size = 16479830, upload-time = "2026-01-10T06:44:53.931Z" }, + { url = "https://files.pythonhosted.org/packages/5b/c7/b801bf98514b6ae6475e941ac05c58e6411dd863ea92916bfd6d510b08c1/numpy-2.4.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4f1b68ff47680c2925f8063402a693ede215f0257f02596b1318ecdfb1d79e33", size = 12492579, upload-time = "2026-01-10T06:44:57.094Z" }, ] [[package]] @@ -3895,7 +3895,7 @@ wheels = [ [[package]] name = "openai" -version = "2.14.0" +version = "2.15.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3907,9 +3907,9 @@ dependencies = [ { name = "tqdm", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d8/b1/12fe1c196bea326261718eb037307c1c1fe1dedc2d2d4de777df822e6238/openai-2.14.0.tar.gz", hash = "sha256:419357bedde9402d23bf8f2ee372fca1985a73348debba94bddff06f19459952", size = 626938, upload-time = "2025-12-19T03:28:45.742Z" } +sdist = { url = "https://files.pythonhosted.org/packages/94/f4/4690ecb5d70023ce6bfcfeabfe717020f654bde59a775058ec6ac4692463/openai-2.15.0.tar.gz", hash = "sha256:42eb8cbb407d84770633f31bf727d4ffb4138711c670565a41663d9439174fba", size = 627383, upload-time = "2026-01-09T22:10:08.603Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/27/4b/7c1a00c2c3fbd004253937f7520f692a9650767aa73894d7a34f0d65d3f4/openai-2.14.0-py3-none-any.whl", hash = "sha256:7ea40aca4ffc4c4a776e77679021b47eec1160e341f42ae086ba949c9dcc9183", size = 1067558, upload-time = "2025-12-19T03:28:43.727Z" }, + { url = "https://files.pythonhosted.org/packages/b5/df/c306f7375d42bafb379934c2df4c2fa3964656c8c782bac75ee10c102818/openai-2.15.0-py3-none-any.whl", hash = "sha256:6ae23b932cd7230f7244e52954daa6602716d6b9bf235401a107af731baea6c3", size = 1067879, upload-time = "2026-01-09T22:10:06.446Z" }, ] [[package]] @@ -3932,7 +3932,7 @@ wheels = [ [[package]] name = "openai-chatkit" -version = "1.5.0" +version = "1.5.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jinja2", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3941,9 +3941,9 @@ dependencies = [ { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "uvicorn", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a9/0d/b8d9666d5b3fef50b000ff5ba75b6138c729fba8fae79dbce8d3fbd9df66/openai_chatkit-1.5.0.tar.gz", hash = "sha256:17f362d26c2a9bc14c36fcb157768108e3195bf7265a8914507e4aa497133327", size = 58770, upload-time = "2026-01-06T20:33:17.487Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0e/f3/3e7aafd6c29348e60d32082fb14e539661fe4100453a31b34d0fef1ff7b7/openai_chatkit-1.5.2.tar.gz", hash = "sha256:187d27b815f153fa060337c86ee3aab189f72269f23ac2bb2a35c6c88b83846d", size = 59268, upload-time = "2026-01-10T00:59:41.215Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8d/c5/e93fffca480ce0b622ca047a36d3484401ea4f0800e133a5f7fb36ee3ca1/openai_chatkit-1.5.0-py3-none-any.whl", hash = "sha256:0cd22e4b6263d9c001190e22430f5190f7745abbcbbaa47392bd3e5b0c9e79b0", size = 41348, upload-time = "2026-01-06T20:33:16.011Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b6/475a4c723fb2e0de30feea505505eabe77666aa7d81855d356fb289e3d8a/openai_chatkit-1.5.2-py3-none-any.whl", hash = "sha256:3bf3f140f314924ef1d4148ce5174cff6aa4c5d1760f988ba2aa267fd434f960", size = 41482, upload-time = "2026-01-10T00:59:40.023Z" }, ] [[package]] @@ -4210,7 +4210,7 @@ version = "2.3.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "python-dateutil", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pytz", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tzdata", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -4277,11 +4277,11 @@ wheels = [ [[package]] name = "pathspec" -version = "1.0.2" +version = "1.0.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/41/b9/6eb731b52f132181a9144bbe77ff82117f6b2d2fbfba49aaab2c014c4760/pathspec-1.0.2.tar.gz", hash = "sha256:fa32b1eb775ed9ba8d599b22c5f906dc098113989da2c00bf8b210078ca7fb92", size = 130502, upload-time = "2026-01-08T04:33:27.613Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/b2/bb8e495d5262bfec41ab5cb18f522f1012933347fb5d9e62452d446baca2/pathspec-1.0.3.tar.gz", hash = "sha256:bac5cf97ae2c2876e2d25ebb15078eb04d76e4b98921ee31c6f85ade8b59444d", size = 130841, upload-time = "2026-01-09T15:46:46.009Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/78/6b/14fc9049d78435fd29e82846c777bd7ed9c470013dc8d0260fff3ff1c11e/pathspec-1.0.2-py3-none-any.whl", hash = "sha256:62f8558917908d237d399b9b338ef455a814801a4688bc41074b25feefd93472", size = 54844, upload-time = "2026-01-08T04:33:26.4Z" }, + { url = "https://files.pythonhosted.org/packages/32/2b/121e912bd60eebd623f873fd090de0e84f322972ab25a7f9044c056804ed/pathspec-1.0.3-py3-none-any.whl", hash = "sha256:e80767021c1cc524aa3fb14bedda9c34406591343cc42797b386ce7b9354fb6c", size = 55021, upload-time = "2026-01-09T15:46:44.652Z" }, ] [[package]] @@ -4447,28 +4447,30 @@ wheels = [ [[package]] name = "polars" -version = "1.36.1" +version = "1.37.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "polars-runtime-32", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9f/dc/56f2a90c79a2cb13f9e956eab6385effe54216ae7a2068b3a6406bae4345/polars-1.36.1.tar.gz", hash = "sha256:12c7616a2305559144711ab73eaa18814f7aa898c522e7645014b68f1432d54c", size = 711993, upload-time = "2025-12-10T01:14:53.033Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/b5/ce40267c54b66f93572d84f7ba1c216b72a71cb2235e3724fab0911541fe/polars-1.37.0.tar.gz", hash = "sha256:6bbbeefb6f02f848d46ad4f4e922a92573986fd38611801c696bae98b02be4c8", size = 715429, upload-time = "2026-01-10T12:28:06.741Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/c6/36a1b874036b49893ecae0ac44a2f63d1a76e6212631a5b2f50a86e0e8af/polars-1.36.1-py3-none-any.whl", hash = "sha256:853c1bbb237add6a5f6d133c15094a9b727d66dd6a4eb91dbb07cdb056b2b8ef", size = 802429, upload-time = "2025-12-10T01:13:53.838Z" }, + { url = "https://files.pythonhosted.org/packages/31/07/d890382bbfdeb25db039ef4a8c8f93b3faf0016e18130513274204954203/polars-1.37.0-py3-none-any.whl", hash = "sha256:fcc549b9923ef1bd6fd99b5fd0a00dfedf85406f4758ae018a69bcd18a91f113", size = 805614, upload-time = "2026-01-10T12:26:47.897Z" }, ] [[package]] name = "polars-runtime-32" -version = "1.36.1" +version = "1.37.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/31/df/597c0ef5eb8d761a16d72327846599b57c5d40d7f9e74306fc154aba8c37/polars_runtime_32-1.36.1.tar.gz", hash = "sha256:201c2cfd80ceb5d5cd7b63085b5fd08d6ae6554f922bcb941035e39638528a09", size = 2788751, upload-time = "2025-12-10T01:14:54.172Z" } +sdist = { url = "https://files.pythonhosted.org/packages/30/92/b818590a5ebcc55657f5483f26133174bd2b9ca88457b60c93669a9d0c75/polars_runtime_32-1.37.0.tar.gz", hash = "sha256:954ddb056e3a2db2cbcaae501225ac5604d1599b6debd9c6dbdf8efbac0e6511", size = 2820371, upload-time = "2026-01-10T12:28:08.195Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e1/ea/871129a2d296966c0925b078a9a93c6c5e7facb1c5eebfcd3d5811aeddc1/polars_runtime_32-1.36.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:327b621ca82594f277751f7e23d4b939ebd1be18d54b4cdf7a2f8406cecc18b2", size = 43494311, upload-time = "2025-12-10T01:13:56.096Z" }, - { url = "https://files.pythonhosted.org/packages/d8/76/0038210ad1e526ce5bb2933b13760d6b986b3045eccc1338e661bd656f77/polars_runtime_32-1.36.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:ab0d1f23084afee2b97de8c37aa3e02ec3569749ae39571bd89e7a8b11ae9e83", size = 39300602, upload-time = "2025-12-10T01:13:59.366Z" }, - { url = "https://files.pythonhosted.org/packages/54/1e/2707bee75a780a953a77a2c59829ee90ef55708f02fc4add761c579bf76e/polars_runtime_32-1.36.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:899b9ad2e47ceb31eb157f27a09dbc2047efbf4969a923a6b1ba7f0412c3e64c", size = 44511780, upload-time = "2025-12-10T01:14:02.285Z" }, - { url = "https://files.pythonhosted.org/packages/11/b2/3fede95feee441be64b4bcb32444679a8fbb7a453a10251583053f6efe52/polars_runtime_32-1.36.1-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:d9d077bb9df711bc635a86540df48242bb91975b353e53ef261c6fae6cb0948f", size = 40688448, upload-time = "2025-12-10T01:14:05.131Z" }, - { url = "https://files.pythonhosted.org/packages/05/0f/e629713a72999939b7b4bfdbf030a32794db588b04fdf3dc977dd8ea6c53/polars_runtime_32-1.36.1-cp39-abi3-win_amd64.whl", hash = "sha256:cc17101f28c9a169ff8b5b8d4977a3683cd403621841623825525f440b564cf0", size = 44464898, upload-time = "2025-12-10T01:14:08.296Z" }, - { url = "https://files.pythonhosted.org/packages/d1/d8/a12e6aa14f63784cead437083319ec7cece0d5bb9a5bfe7678cc6578b52a/polars_runtime_32-1.36.1-cp39-abi3-win_arm64.whl", hash = "sha256:809e73857be71250141225ddd5d2b30c97e6340aeaa0d445f930e01bef6888dc", size = 39798896, upload-time = "2025-12-10T01:14:11.568Z" }, + { url = "https://files.pythonhosted.org/packages/f0/67/76162c9fcc71b917bdfd2804eaf0ab7cdb264a89b89af4f195a918f9f97d/polars_runtime_32-1.37.0-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3591f4b8e734126d713a12869d3727360acbbcd1d440b45d830497a317a5a8b3", size = 43518436, upload-time = "2026-01-10T12:26:51.442Z" }, + { url = "https://files.pythonhosted.org/packages/cb/ec/56f328e8fa4ebea453f5bc10c579774dff774a873ff224b3108d53c514f9/polars_runtime_32-1.37.0-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:47849420859159681e94589daad3a04ff66a2379c116ccd812d043f7ffe0094c", size = 39663939, upload-time = "2026-01-10T12:26:54.664Z" }, + { url = "https://files.pythonhosted.org/packages/4c/b2/f1ea0edba327a92ce0158b7a0e4abe21f541e44c9fb8ec932cc47592ca5c/polars_runtime_32-1.37.0-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4648ea1e821b9a841b2a562f27bcf54ff1ad21f9c217adcf0f7d0b3c33dc6400", size = 41481348, upload-time = "2026-01-10T12:26:57.598Z" }, + { url = "https://files.pythonhosted.org/packages/3b/21/788a3dd724bb21cf42e2f4daa6510a47787e8b30dd535aa6cae20ea968d0/polars_runtime_32-1.37.0-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5272b6f1680a3e0d77c9f07cb5a54f307079eb5d519c71aa3c37b9af0ee03a9e", size = 45168069, upload-time = "2026-01-10T12:27:00.98Z" }, + { url = "https://files.pythonhosted.org/packages/8a/73/823d6534a20ebdcec4b7706ab2b3f2cfb8e07571305f4e7381cc22d83e31/polars_runtime_32-1.37.0-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:73301ef4fe80d8d748085259a4063ac52ff058088daa702e2a75e7d1ab7f14fc", size = 41675645, upload-time = "2026-01-10T12:27:04.334Z" }, + { url = "https://files.pythonhosted.org/packages/30/54/1bacad96dc2b67d33b886a45b249777212782561493718785cb27c7c362a/polars_runtime_32-1.37.0-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:c60d523d738a7b3660d9abdfaff798f7602488f469d427865965b0bd2e40473a", size = 44737715, upload-time = "2026-01-10T12:27:08.152Z" }, + { url = "https://files.pythonhosted.org/packages/38/e3/aad525d8d89b903fcfa2bd0b4cb66b8a6e83e80b3d1348c5a428092d2983/polars_runtime_32-1.37.0-cp310-abi3-win_amd64.whl", hash = "sha256:f87f76f16e8030d277ecca0c0976aca62ec2b6ba2099ee9c6f75dfc97e7dc1b1", size = 45018403, upload-time = "2026-01-10T12:27:11.292Z" }, + { url = "https://files.pythonhosted.org/packages/0e/4d/ddcaa5f2e18763e02e66d0fd2efca049a42fe96fbeda188e89aeb38dd6fa/polars_runtime_32-1.37.0-cp310-abi3-win_arm64.whl", hash = "sha256:7ffbd9487e3668b0a57519f7ab5ab53ab656086db9f62dceaab41393a07be721", size = 41026243, upload-time = "2026-01-10T12:27:14.563Z" }, ] [[package]] @@ -5284,7 +5286,7 @@ dependencies = [ { name = "grpcio", version = "1.76.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.14' and sys_platform == 'darwin') or (python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')" }, { name = "httpx", extra = ["http2"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "portalocker", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "protobuf", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -5315,7 +5317,7 @@ dependencies = [ { name = "jsonpath-ng", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "ml-dtypes", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "python-ulid", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pyyaml", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -5731,8 +5733,8 @@ resolution-markers = [ ] dependencies = [ { name = "joblib", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, - { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "scipy", version = "1.17.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "threadpoolctl", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0e/d4/40988bf3b8e34feec1d0e6a051446b1f66225f8529b9309becaeef62b6c4/scikit_learn-1.8.0.tar.gz", hash = "sha256:9bccbb3b40e3de10351f8f5068e105d0f4083b1a65fa07b6634fbc401a6287fd", size = 7335585, upload-time = "2025-12-10T07:08:53.618Z" } @@ -5838,7 +5840,7 @@ wheels = [ [[package]] name = "scipy" -version = "1.16.3" +version = "1.17.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'darwin'", @@ -5855,70 +5857,70 @@ resolution-markers = [ "python_full_version == '3.11.*' and sys_platform == 'win32'", ] dependencies = [ - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/0a/ca/d8ace4f98322d01abcd52d381134344bf7b431eba7ed8b42bdea5a3c2ac9/scipy-1.16.3.tar.gz", hash = "sha256:01e87659402762f43bd2fee13370553a17ada367d42e7487800bf2916535aecb", size = 30597883, upload-time = "2025-10-28T17:38:54.068Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/5f/6f37d7439de1455ce9c5a556b8d1db0979f03a796c030bafdf08d35b7bf9/scipy-1.16.3-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:40be6cf99e68b6c4321e9f8782e7d5ff8265af28ef2cd56e9c9b2638fa08ad97", size = 36630881, upload-time = "2025-10-28T17:31:47.104Z" }, - { url = "https://files.pythonhosted.org/packages/7c/89/d70e9f628749b7e4db2aa4cd89735502ff3f08f7b9b27d2e799485987cd9/scipy-1.16.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:8be1ca9170fcb6223cc7c27f4305d680ded114a1567c0bd2bfcbf947d1b17511", size = 28941012, upload-time = "2025-10-28T17:31:53.411Z" }, - { url = "https://files.pythonhosted.org/packages/a8/a8/0e7a9a6872a923505dbdf6bb93451edcac120363131c19013044a1e7cb0c/scipy-1.16.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:bea0a62734d20d67608660f69dcda23e7f90fb4ca20974ab80b6ed40df87a005", size = 20931935, upload-time = "2025-10-28T17:31:57.361Z" }, - { url = "https://files.pythonhosted.org/packages/bd/c7/020fb72bd79ad798e4dbe53938543ecb96b3a9ac3fe274b7189e23e27353/scipy-1.16.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:2a207a6ce9c24f1951241f4693ede2d393f59c07abc159b2cb2be980820e01fb", size = 23534466, upload-time = "2025-10-28T17:32:01.875Z" }, - { url = "https://files.pythonhosted.org/packages/be/a0/668c4609ce6dbf2f948e167836ccaf897f95fb63fa231c87da7558a374cd/scipy-1.16.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:532fb5ad6a87e9e9cd9c959b106b73145a03f04c7d57ea3e6f6bb60b86ab0876", size = 33593618, upload-time = "2025-10-28T17:32:06.902Z" }, - { url = "https://files.pythonhosted.org/packages/ca/6e/8942461cf2636cdae083e3eb72622a7fbbfa5cf559c7d13ab250a5dbdc01/scipy-1.16.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0151a0749efeaaab78711c78422d413c583b8cdd2011a3c1d6c794938ee9fdb2", size = 35899798, upload-time = "2025-10-28T17:32:12.665Z" }, - { url = "https://files.pythonhosted.org/packages/79/e8/d0f33590364cdbd67f28ce79368b373889faa4ee959588beddf6daef9abe/scipy-1.16.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b7180967113560cca57418a7bc719e30366b47959dd845a93206fbed693c867e", size = 36226154, upload-time = "2025-10-28T17:32:17.961Z" }, - { url = "https://files.pythonhosted.org/packages/39/c1/1903de608c0c924a1749c590064e65810f8046e437aba6be365abc4f7557/scipy-1.16.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:deb3841c925eeddb6afc1e4e4a45e418d19ec7b87c5df177695224078e8ec733", size = 38878540, upload-time = "2025-10-28T17:32:23.907Z" }, - { url = "https://files.pythonhosted.org/packages/f1/d0/22ec7036ba0b0a35bccb7f25ab407382ed34af0b111475eb301c16f8a2e5/scipy-1.16.3-cp311-cp311-win_amd64.whl", hash = "sha256:53c3844d527213631e886621df5695d35e4f6a75f620dca412bcd292f6b87d78", size = 38722107, upload-time = "2025-10-28T17:32:29.921Z" }, - { url = "https://files.pythonhosted.org/packages/7b/60/8a00e5a524bb3bf8898db1650d350f50e6cffb9d7a491c561dc9826c7515/scipy-1.16.3-cp311-cp311-win_arm64.whl", hash = "sha256:9452781bd879b14b6f055b26643703551320aa8d79ae064a71df55c00286a184", size = 25506272, upload-time = "2025-10-28T17:32:34.577Z" }, - { url = "https://files.pythonhosted.org/packages/40/41/5bf55c3f386b1643812f3a5674edf74b26184378ef0f3e7c7a09a7e2ca7f/scipy-1.16.3-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:81fc5827606858cf71446a5e98715ba0e11f0dbc83d71c7409d05486592a45d6", size = 36659043, upload-time = "2025-10-28T17:32:40.285Z" }, - { url = "https://files.pythonhosted.org/packages/1e/0f/65582071948cfc45d43e9870bf7ca5f0e0684e165d7c9ef4e50d783073eb/scipy-1.16.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:c97176013d404c7346bf57874eaac5187d969293bf40497140b0a2b2b7482e07", size = 28898986, upload-time = "2025-10-28T17:32:45.325Z" }, - { url = "https://files.pythonhosted.org/packages/96/5e/36bf3f0ac298187d1ceadde9051177d6a4fe4d507e8f59067dc9dd39e650/scipy-1.16.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2b71d93c8a9936046866acebc915e2af2e292b883ed6e2cbe5c34beb094b82d9", size = 20889814, upload-time = "2025-10-28T17:32:49.277Z" }, - { url = "https://files.pythonhosted.org/packages/80/35/178d9d0c35394d5d5211bbff7ac4f2986c5488b59506fef9e1de13ea28d3/scipy-1.16.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3d4a07a8e785d80289dfe66b7c27d8634a773020742ec7187b85ccc4b0e7b686", size = 23565795, upload-time = "2025-10-28T17:32:53.337Z" }, - { url = "https://files.pythonhosted.org/packages/fa/46/d1146ff536d034d02f83c8afc3c4bab2eddb634624d6529a8512f3afc9da/scipy-1.16.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0553371015692a898e1aa858fed67a3576c34edefa6b7ebdb4e9dde49ce5c203", size = 33349476, upload-time = "2025-10-28T17:32:58.353Z" }, - { url = "https://files.pythonhosted.org/packages/79/2e/415119c9ab3e62249e18c2b082c07aff907a273741b3f8160414b0e9193c/scipy-1.16.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:72d1717fd3b5e6ec747327ce9bda32d5463f472c9dce9f54499e81fbd50245a1", size = 35676692, upload-time = "2025-10-28T17:33:03.88Z" }, - { url = "https://files.pythonhosted.org/packages/27/82/df26e44da78bf8d2aeaf7566082260cfa15955a5a6e96e6a29935b64132f/scipy-1.16.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1fb2472e72e24d1530debe6ae078db70fb1605350c88a3d14bc401d6306dbffe", size = 36019345, upload-time = "2025-10-28T17:33:09.773Z" }, - { url = "https://files.pythonhosted.org/packages/82/31/006cbb4b648ba379a95c87262c2855cd0d09453e500937f78b30f02fa1cd/scipy-1.16.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c5192722cffe15f9329a3948c4b1db789fbb1f05c97899187dcf009b283aea70", size = 38678975, upload-time = "2025-10-28T17:33:15.809Z" }, - { url = "https://files.pythonhosted.org/packages/c2/7f/acbd28c97e990b421af7d6d6cd416358c9c293fc958b8529e0bd5d2a2a19/scipy-1.16.3-cp312-cp312-win_amd64.whl", hash = "sha256:56edc65510d1331dae01ef9b658d428e33ed48b4f77b1d51caf479a0253f96dc", size = 38555926, upload-time = "2025-10-28T17:33:21.388Z" }, - { url = "https://files.pythonhosted.org/packages/ce/69/c5c7807fd007dad4f48e0a5f2153038dc96e8725d3345b9ee31b2b7bed46/scipy-1.16.3-cp312-cp312-win_arm64.whl", hash = "sha256:a8a26c78ef223d3e30920ef759e25625a0ecdd0d60e5a8818b7513c3e5384cf2", size = 25463014, upload-time = "2025-10-28T17:33:25.975Z" }, - { url = "https://files.pythonhosted.org/packages/72/f1/57e8327ab1508272029e27eeef34f2302ffc156b69e7e233e906c2a5c379/scipy-1.16.3-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:d2ec56337675e61b312179a1ad124f5f570c00f920cc75e1000025451b88241c", size = 36617856, upload-time = "2025-10-28T17:33:31.375Z" }, - { url = "https://files.pythonhosted.org/packages/44/13/7e63cfba8a7452eb756306aa2fd9b37a29a323b672b964b4fdeded9a3f21/scipy-1.16.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:16b8bc35a4cc24db80a0ec836a9286d0e31b2503cb2fd7ff7fb0e0374a97081d", size = 28874306, upload-time = "2025-10-28T17:33:36.516Z" }, - { url = "https://files.pythonhosted.org/packages/15/65/3a9400efd0228a176e6ec3454b1fa998fbbb5a8defa1672c3f65706987db/scipy-1.16.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:5803c5fadd29de0cf27fa08ccbfe7a9e5d741bf63e4ab1085437266f12460ff9", size = 20865371, upload-time = "2025-10-28T17:33:42.094Z" }, - { url = "https://files.pythonhosted.org/packages/33/d7/eda09adf009a9fb81827194d4dd02d2e4bc752cef16737cc4ef065234031/scipy-1.16.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:b81c27fc41954319a943d43b20e07c40bdcd3ff7cf013f4fb86286faefe546c4", size = 23524877, upload-time = "2025-10-28T17:33:48.483Z" }, - { url = "https://files.pythonhosted.org/packages/7d/6b/3f911e1ebc364cb81320223a3422aab7d26c9c7973109a9cd0f27c64c6c0/scipy-1.16.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0c3b4dd3d9b08dbce0f3440032c52e9e2ab9f96ade2d3943313dfe51a7056959", size = 33342103, upload-time = "2025-10-28T17:33:56.495Z" }, - { url = "https://files.pythonhosted.org/packages/21/f6/4bfb5695d8941e5c570a04d9fcd0d36bce7511b7d78e6e75c8f9791f82d0/scipy-1.16.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7dc1360c06535ea6116a2220f760ae572db9f661aba2d88074fe30ec2aa1ff88", size = 35697297, upload-time = "2025-10-28T17:34:04.722Z" }, - { url = "https://files.pythonhosted.org/packages/04/e1/6496dadbc80d8d896ff72511ecfe2316b50313bfc3ebf07a3f580f08bd8c/scipy-1.16.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:663b8d66a8748051c3ee9c96465fb417509315b99c71550fda2591d7dd634234", size = 36021756, upload-time = "2025-10-28T17:34:13.482Z" }, - { url = "https://files.pythonhosted.org/packages/fe/bd/a8c7799e0136b987bda3e1b23d155bcb31aec68a4a472554df5f0937eef7/scipy-1.16.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eab43fae33a0c39006a88096cd7b4f4ef545ea0447d250d5ac18202d40b6611d", size = 38696566, upload-time = "2025-10-28T17:34:22.384Z" }, - { url = "https://files.pythonhosted.org/packages/cd/01/1204382461fcbfeb05b6161b594f4007e78b6eba9b375382f79153172b4d/scipy-1.16.3-cp313-cp313-win_amd64.whl", hash = "sha256:062246acacbe9f8210de8e751b16fc37458213f124bef161a5a02c7a39284304", size = 38529877, upload-time = "2025-10-28T17:35:51.076Z" }, - { url = "https://files.pythonhosted.org/packages/7f/14/9d9fbcaa1260a94f4bb5b64ba9213ceb5d03cd88841fe9fd1ffd47a45b73/scipy-1.16.3-cp313-cp313-win_arm64.whl", hash = "sha256:50a3dbf286dbc7d84f176f9a1574c705f277cb6565069f88f60db9eafdbe3ee2", size = 25455366, upload-time = "2025-10-28T17:35:59.014Z" }, - { url = "https://files.pythonhosted.org/packages/e2/a3/9ec205bd49f42d45d77f1730dbad9ccf146244c1647605cf834b3a8c4f36/scipy-1.16.3-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:fb4b29f4cf8cc5a8d628bc8d8e26d12d7278cd1f219f22698a378c3d67db5e4b", size = 37027931, upload-time = "2025-10-28T17:34:31.451Z" }, - { url = "https://files.pythonhosted.org/packages/25/06/ca9fd1f3a4589cbd825b1447e5db3a8ebb969c1eaf22c8579bd286f51b6d/scipy-1.16.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:8d09d72dc92742988b0e7750bddb8060b0c7079606c0d24a8cc8e9c9c11f9079", size = 29400081, upload-time = "2025-10-28T17:34:39.087Z" }, - { url = "https://files.pythonhosted.org/packages/6a/56/933e68210d92657d93fb0e381683bc0e53a965048d7358ff5fbf9e6a1b17/scipy-1.16.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:03192a35e661470197556de24e7cb1330d84b35b94ead65c46ad6f16f6b28f2a", size = 21391244, upload-time = "2025-10-28T17:34:45.234Z" }, - { url = "https://files.pythonhosted.org/packages/a8/7e/779845db03dc1418e215726329674b40576879b91814568757ff0014ad65/scipy-1.16.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:57d01cb6f85e34f0946b33caa66e892aae072b64b034183f3d87c4025802a119", size = 23929753, upload-time = "2025-10-28T17:34:51.793Z" }, - { url = "https://files.pythonhosted.org/packages/4c/4b/f756cf8161d5365dcdef9e5f460ab226c068211030a175d2fc7f3f41ca64/scipy-1.16.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:96491a6a54e995f00a28a3c3badfff58fd093bf26cd5fb34a2188c8c756a3a2c", size = 33496912, upload-time = "2025-10-28T17:34:59.8Z" }, - { url = "https://files.pythonhosted.org/packages/09/b5/222b1e49a58668f23839ca1542a6322bb095ab8d6590d4f71723869a6c2c/scipy-1.16.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cd13e354df9938598af2be05822c323e97132d5e6306b83a3b4ee6724c6e522e", size = 35802371, upload-time = "2025-10-28T17:35:08.173Z" }, - { url = "https://files.pythonhosted.org/packages/c1/8d/5964ef68bb31829bde27611f8c9deeac13764589fe74a75390242b64ca44/scipy-1.16.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63d3cdacb8a824a295191a723ee5e4ea7768ca5ca5f2838532d9f2e2b3ce2135", size = 36190477, upload-time = "2025-10-28T17:35:16.7Z" }, - { url = "https://files.pythonhosted.org/packages/ab/f2/b31d75cb9b5fa4dd39a0a931ee9b33e7f6f36f23be5ef560bf72e0f92f32/scipy-1.16.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e7efa2681ea410b10dde31a52b18b0154d66f2485328830e45fdf183af5aefc6", size = 38796678, upload-time = "2025-10-28T17:35:26.354Z" }, - { url = "https://files.pythonhosted.org/packages/b4/1e/b3723d8ff64ab548c38d87055483714fefe6ee20e0189b62352b5e015bb1/scipy-1.16.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2d1ae2cf0c350e7705168ff2429962a89ad90c2d49d1dd300686d8b2a5af22fc", size = 38640178, upload-time = "2025-10-28T17:35:35.304Z" }, - { url = "https://files.pythonhosted.org/packages/8e/f3/d854ff38789aca9b0cc23008d607ced9de4f7ab14fa1ca4329f86b3758ca/scipy-1.16.3-cp313-cp313t-win_arm64.whl", hash = "sha256:0c623a54f7b79dd88ef56da19bc2873afec9673a48f3b85b18e4d402bdd29a5a", size = 25803246, upload-time = "2025-10-28T17:35:42.155Z" }, - { url = "https://files.pythonhosted.org/packages/99/f6/99b10fd70f2d864c1e29a28bbcaa0c6340f9d8518396542d9ea3b4aaae15/scipy-1.16.3-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:875555ce62743e1d54f06cdf22c1e0bc47b91130ac40fe5d783b6dfa114beeb6", size = 36606469, upload-time = "2025-10-28T17:36:08.741Z" }, - { url = "https://files.pythonhosted.org/packages/4d/74/043b54f2319f48ea940dd025779fa28ee360e6b95acb7cd188fad4391c6b/scipy-1.16.3-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:bb61878c18a470021fb515a843dc7a76961a8daceaaaa8bad1332f1bf4b54657", size = 28872043, upload-time = "2025-10-28T17:36:16.599Z" }, - { url = "https://files.pythonhosted.org/packages/4d/e1/24b7e50cc1c4ee6ffbcb1f27fe9f4c8b40e7911675f6d2d20955f41c6348/scipy-1.16.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:f2622206f5559784fa5c4b53a950c3c7c1cf3e84ca1b9c4b6c03f062f289ca26", size = 20862952, upload-time = "2025-10-28T17:36:22.966Z" }, - { url = "https://files.pythonhosted.org/packages/dd/3a/3e8c01a4d742b730df368e063787c6808597ccb38636ed821d10b39ca51b/scipy-1.16.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:7f68154688c515cdb541a31ef8eb66d8cd1050605be9dcd74199cbd22ac739bc", size = 23508512, upload-time = "2025-10-28T17:36:29.731Z" }, - { url = "https://files.pythonhosted.org/packages/1f/60/c45a12b98ad591536bfe5330cb3cfe1850d7570259303563b1721564d458/scipy-1.16.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8b3c820ddb80029fe9f43d61b81d8b488d3ef8ca010d15122b152db77dc94c22", size = 33413639, upload-time = "2025-10-28T17:36:37.982Z" }, - { url = "https://files.pythonhosted.org/packages/71/bc/35957d88645476307e4839712642896689df442f3e53b0fa016ecf8a3357/scipy-1.16.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d3837938ae715fc0fe3c39c0202de3a8853aff22ca66781ddc2ade7554b7e2cc", size = 35704729, upload-time = "2025-10-28T17:36:46.547Z" }, - { url = "https://files.pythonhosted.org/packages/3b/15/89105e659041b1ca11c386e9995aefacd513a78493656e57789f9d9eab61/scipy-1.16.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:aadd23f98f9cb069b3bd64ddc900c4d277778242e961751f77a8cb5c4b946fb0", size = 36086251, upload-time = "2025-10-28T17:36:55.161Z" }, - { url = "https://files.pythonhosted.org/packages/1a/87/c0ea673ac9c6cc50b3da2196d860273bc7389aa69b64efa8493bdd25b093/scipy-1.16.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b7c5f1bda1354d6a19bc6af73a649f8285ca63ac6b52e64e658a5a11d4d69800", size = 38716681, upload-time = "2025-10-28T17:37:04.1Z" }, - { url = "https://files.pythonhosted.org/packages/91/06/837893227b043fb9b0d13e4bd7586982d8136cb249ffb3492930dab905b8/scipy-1.16.3-cp314-cp314-win_amd64.whl", hash = "sha256:e5d42a9472e7579e473879a1990327830493a7047506d58d73fc429b84c1d49d", size = 39358423, upload-time = "2025-10-28T17:38:20.005Z" }, - { url = "https://files.pythonhosted.org/packages/95/03/28bce0355e4d34a7c034727505a02d19548549e190bedd13a721e35380b7/scipy-1.16.3-cp314-cp314-win_arm64.whl", hash = "sha256:6020470b9d00245926f2d5bb93b119ca0340f0d564eb6fbaad843eaebf9d690f", size = 26135027, upload-time = "2025-10-28T17:38:24.966Z" }, - { url = "https://files.pythonhosted.org/packages/b2/6f/69f1e2b682efe9de8fe9f91040f0cd32f13cfccba690512ba4c582b0bc29/scipy-1.16.3-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:e1d27cbcb4602680a49d787d90664fa4974063ac9d4134813332a8c53dbe667c", size = 37028379, upload-time = "2025-10-28T17:37:14.061Z" }, - { url = "https://files.pythonhosted.org/packages/7c/2d/e826f31624a5ebbab1cd93d30fd74349914753076ed0593e1d56a98c4fb4/scipy-1.16.3-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:9b9c9c07b6d56a35777a1b4cc8966118fb16cfd8daf6743867d17d36cfad2d40", size = 29400052, upload-time = "2025-10-28T17:37:21.709Z" }, - { url = "https://files.pythonhosted.org/packages/69/27/d24feb80155f41fd1f156bf144e7e049b4e2b9dd06261a242905e3bc7a03/scipy-1.16.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:3a4c460301fb2cffb7f88528f30b3127742cff583603aa7dc964a52c463b385d", size = 21391183, upload-time = "2025-10-28T17:37:29.559Z" }, - { url = "https://files.pythonhosted.org/packages/f8/d3/1b229e433074c5738a24277eca520a2319aac7465eea7310ea6ae0e98ae2/scipy-1.16.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:f667a4542cc8917af1db06366d3f78a5c8e83badd56409f94d1eac8d8d9133fa", size = 23930174, upload-time = "2025-10-28T17:37:36.306Z" }, - { url = "https://files.pythonhosted.org/packages/16/9d/d9e148b0ec680c0f042581a2be79a28a7ab66c0c4946697f9e7553ead337/scipy-1.16.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f379b54b77a597aa7ee5e697df0d66903e41b9c85a6dd7946159e356319158e8", size = 33497852, upload-time = "2025-10-28T17:37:42.228Z" }, - { url = "https://files.pythonhosted.org/packages/2f/22/4e5f7561e4f98b7bea63cf3fd7934bff1e3182e9f1626b089a679914d5c8/scipy-1.16.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4aff59800a3b7f786b70bfd6ab551001cb553244988d7d6b8299cb1ea653b353", size = 35798595, upload-time = "2025-10-28T17:37:48.102Z" }, - { url = "https://files.pythonhosted.org/packages/83/42/6644d714c179429fc7196857866f219fef25238319b650bb32dde7bf7a48/scipy-1.16.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:da7763f55885045036fabcebd80144b757d3db06ab0861415d1c3b7c69042146", size = 36186269, upload-time = "2025-10-28T17:37:53.72Z" }, - { url = "https://files.pythonhosted.org/packages/ac/70/64b4d7ca92f9cf2e6fc6aaa2eecf80bb9b6b985043a9583f32f8177ea122/scipy-1.16.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ffa6eea95283b2b8079b821dc11f50a17d0571c92b43e2b5b12764dc5f9b285d", size = 38802779, upload-time = "2025-10-28T17:37:59.393Z" }, - { url = "https://files.pythonhosted.org/packages/61/82/8d0e39f62764cce5ffd5284131e109f07cf8955aef9ab8ed4e3aa5e30539/scipy-1.16.3-cp314-cp314t-win_amd64.whl", hash = "sha256:d9f48cafc7ce94cf9b15c6bffdc443a81a27bf7075cf2dcd5c8b40f85d10c4e7", size = 39471128, upload-time = "2025-10-28T17:38:05.259Z" }, - { url = "https://files.pythonhosted.org/packages/64/47/a494741db7280eae6dc033510c319e34d42dd41b7ac0c7ead39354d1a2b5/scipy-1.16.3-cp314-cp314t-win_arm64.whl", hash = "sha256:21d9d6b197227a12dcbf9633320a4e34c6b0e51c57268df255a0942983bac562", size = 26464127, upload-time = "2025-10-28T17:38:11.34Z" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/56/3e/9cca699f3486ce6bc12ff46dc2031f1ec8eb9ccc9a320fdaf925f1417426/scipy-1.17.0.tar.gz", hash = "sha256:2591060c8e648d8b96439e111ac41fd8342fdeff1876be2e19dea3fe8930454e", size = 30396830, upload-time = "2026-01-10T21:34:23.009Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/4b/c89c131aa87cad2b77a54eb0fb94d633a842420fa7e919dc2f922037c3d8/scipy-1.17.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:2abd71643797bd8a106dff97894ff7869eeeb0af0f7a5ce02e4227c6a2e9d6fd", size = 31381316, upload-time = "2026-01-10T21:24:33.42Z" }, + { url = "https://files.pythonhosted.org/packages/5e/5f/a6b38f79a07d74989224d5f11b55267714707582908a5f1ae854cf9a9b84/scipy-1.17.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:ef28d815f4d2686503e5f4f00edc387ae58dfd7a2f42e348bb53359538f01558", size = 27966760, upload-time = "2026-01-10T21:24:38.911Z" }, + { url = "https://files.pythonhosted.org/packages/c1/20/095ad24e031ee8ed3c5975954d816b8e7e2abd731e04f8be573de8740885/scipy-1.17.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:272a9f16d6bb4667e8b50d25d71eddcc2158a214df1b566319298de0939d2ab7", size = 20138701, upload-time = "2026-01-10T21:24:43.249Z" }, + { url = "https://files.pythonhosted.org/packages/89/11/4aad2b3858d0337756f3323f8960755704e530b27eb2a94386c970c32cbe/scipy-1.17.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:7204fddcbec2fe6598f1c5fdf027e9f259106d05202a959a9f1aecf036adc9f6", size = 22480574, upload-time = "2026-01-10T21:24:47.266Z" }, + { url = "https://files.pythonhosted.org/packages/85/bd/f5af70c28c6da2227e510875cadf64879855193a687fb19951f0f44cfd6b/scipy-1.17.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fc02c37a5639ee67d8fb646ffded6d793c06c5622d36b35cfa8fe5ececb8f042", size = 32862414, upload-time = "2026-01-10T21:24:52.566Z" }, + { url = "https://files.pythonhosted.org/packages/ef/df/df1457c4df3826e908879fe3d76bc5b6e60aae45f4ee42539512438cfd5d/scipy-1.17.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dac97a27520d66c12a34fd90a4fe65f43766c18c0d6e1c0a80f114d2260080e4", size = 35112380, upload-time = "2026-01-10T21:24:58.433Z" }, + { url = "https://files.pythonhosted.org/packages/5f/bb/88e2c16bd1dd4de19d80d7c5e238387182993c2fb13b4b8111e3927ad422/scipy-1.17.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ebb7446a39b3ae0fe8f416a9a3fdc6fba3f11c634f680f16a239c5187bc487c0", size = 34922676, upload-time = "2026-01-10T21:25:04.287Z" }, + { url = "https://files.pythonhosted.org/packages/02/ba/5120242cc735f71fc002cff0303d536af4405eb265f7c60742851e7ccfe9/scipy-1.17.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:474da16199f6af66601a01546144922ce402cb17362e07d82f5a6cf8f963e449", size = 37507599, upload-time = "2026-01-10T21:25:09.851Z" }, + { url = "https://files.pythonhosted.org/packages/52/c8/08629657ac6c0da198487ce8cd3de78e02cfde42b7f34117d56a3fe249dc/scipy-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:255c0da161bd7b32a6c898e7891509e8a9289f0b1c6c7d96142ee0d2b114c2ea", size = 36380284, upload-time = "2026-01-10T21:25:15.632Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4a/465f96d42c6f33ad324a40049dfd63269891db9324aa66c4a1c108c6f994/scipy-1.17.0-cp311-cp311-win_arm64.whl", hash = "sha256:85b0ac3ad17fa3be50abd7e69d583d98792d7edc08367e01445a1e2076005379", size = 24370427, upload-time = "2026-01-10T21:25:20.514Z" }, + { url = "https://files.pythonhosted.org/packages/0b/11/7241a63e73ba5a516f1930ac8d5b44cbbfabd35ac73a2d08ca206df007c4/scipy-1.17.0-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:0d5018a57c24cb1dd828bcf51d7b10e65986d549f52ef5adb6b4d1ded3e32a57", size = 31364580, upload-time = "2026-01-10T21:25:25.717Z" }, + { url = "https://files.pythonhosted.org/packages/ed/1d/5057f812d4f6adc91a20a2d6f2ebcdb517fdbc87ae3acc5633c9b97c8ba5/scipy-1.17.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:88c22af9e5d5a4f9e027e26772cc7b5922fab8bcc839edb3ae33de404feebd9e", size = 27969012, upload-time = "2026-01-10T21:25:30.921Z" }, + { url = "https://files.pythonhosted.org/packages/e3/21/f6ec556c1e3b6ec4e088da667d9987bb77cc3ab3026511f427dc8451187d/scipy-1.17.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f3cd947f20fe17013d401b64e857c6b2da83cae567adbb75b9dcba865abc66d8", size = 20140691, upload-time = "2026-01-10T21:25:34.802Z" }, + { url = "https://files.pythonhosted.org/packages/7a/fe/5e5ad04784964ba964a96f16c8d4676aa1b51357199014dce58ab7ec5670/scipy-1.17.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:e8c0b331c2c1f531eb51f1b4fc9ba709521a712cce58f1aa627bc007421a5306", size = 22463015, upload-time = "2026-01-10T21:25:39.277Z" }, + { url = "https://files.pythonhosted.org/packages/4a/69/7c347e857224fcaf32a34a05183b9d8a7aca25f8f2d10b8a698b8388561a/scipy-1.17.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5194c445d0a1c7a6c1a4a4681b6b7c71baad98ff66d96b949097e7513c9d6742", size = 32724197, upload-time = "2026-01-10T21:25:44.084Z" }, + { url = "https://files.pythonhosted.org/packages/d1/fe/66d73b76d378ba8cc2fe605920c0c75092e3a65ae746e1e767d9d020a75a/scipy-1.17.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9eeb9b5f5997f75507814ed9d298ab23f62cf79f5a3ef90031b1ee2506abdb5b", size = 35009148, upload-time = "2026-01-10T21:25:50.591Z" }, + { url = "https://files.pythonhosted.org/packages/af/07/07dec27d9dc41c18d8c43c69e9e413431d20c53a0339c388bcf72f353c4b/scipy-1.17.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:40052543f7bbe921df4408f46003d6f01c6af109b9e2c8a66dd1cf6cf57f7d5d", size = 34798766, upload-time = "2026-01-10T21:25:59.41Z" }, + { url = "https://files.pythonhosted.org/packages/81/61/0470810c8a093cdacd4ba7504b8a218fd49ca070d79eca23a615f5d9a0b0/scipy-1.17.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0cf46c8013fec9d3694dc572f0b54100c28405d55d3e2cb15e2895b25057996e", size = 37405953, upload-time = "2026-01-10T21:26:07.75Z" }, + { url = "https://files.pythonhosted.org/packages/92/ce/672ed546f96d5d41ae78c4b9b02006cedd0b3d6f2bf5bb76ea455c320c28/scipy-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:0937a0b0d8d593a198cededd4c439a0ea216a3f36653901ea1f3e4be949056f8", size = 36328121, upload-time = "2026-01-10T21:26:16.509Z" }, + { url = "https://files.pythonhosted.org/packages/9d/21/38165845392cae67b61843a52c6455d47d0cc2a40dd495c89f4362944654/scipy-1.17.0-cp312-cp312-win_arm64.whl", hash = "sha256:f603d8a5518c7426414d1d8f82e253e454471de682ce5e39c29adb0df1efb86b", size = 24314368, upload-time = "2026-01-10T21:26:23.087Z" }, + { url = "https://files.pythonhosted.org/packages/0c/51/3468fdfd49387ddefee1636f5cf6d03ce603b75205bf439bbf0e62069bfd/scipy-1.17.0-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:65ec32f3d32dfc48c72df4291345dae4f048749bc8d5203ee0a3f347f96c5ce6", size = 31344101, upload-time = "2026-01-10T21:26:30.25Z" }, + { url = "https://files.pythonhosted.org/packages/b2/9a/9406aec58268d437636069419e6977af953d1e246df941d42d3720b7277b/scipy-1.17.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:1f9586a58039d7229ce77b52f8472c972448cded5736eaf102d5658bbac4c269", size = 27950385, upload-time = "2026-01-10T21:26:36.801Z" }, + { url = "https://files.pythonhosted.org/packages/4f/98/e7342709e17afdfd1b26b56ae499ef4939b45a23a00e471dfb5375eea205/scipy-1.17.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:9fad7d3578c877d606b1150135c2639e9de9cecd3705caa37b66862977cc3e72", size = 20122115, upload-time = "2026-01-10T21:26:42.107Z" }, + { url = "https://files.pythonhosted.org/packages/fd/0e/9eeeb5357a64fd157cbe0302c213517c541cc16b8486d82de251f3c68ede/scipy-1.17.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:423ca1f6584fc03936972b5f7c06961670dbba9f234e71676a7c7ccf938a0d61", size = 22442402, upload-time = "2026-01-10T21:26:48.029Z" }, + { url = "https://files.pythonhosted.org/packages/c9/10/be13397a0e434f98e0c79552b2b584ae5bb1c8b2be95db421533bbca5369/scipy-1.17.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fe508b5690e9eaaa9467fc047f833af58f1152ae51a0d0aed67aa5801f4dd7d6", size = 32696338, upload-time = "2026-01-10T21:26:55.521Z" }, + { url = "https://files.pythonhosted.org/packages/63/1e/12fbf2a3bb240161651c94bb5cdd0eae5d4e8cc6eaeceb74ab07b12a753d/scipy-1.17.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6680f2dfd4f6182e7d6db161344537da644d1cf85cf293f015c60a17ecf08752", size = 34977201, upload-time = "2026-01-10T21:27:03.501Z" }, + { url = "https://files.pythonhosted.org/packages/19/5b/1a63923e23ccd20bd32156d7dd708af5bbde410daa993aa2500c847ab2d2/scipy-1.17.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eec3842ec9ac9de5917899b277428886042a93db0b227ebbe3a333b64ec7643d", size = 34777384, upload-time = "2026-01-10T21:27:11.423Z" }, + { url = "https://files.pythonhosted.org/packages/39/22/b5da95d74edcf81e540e467202a988c50fef41bd2011f46e05f72ba07df6/scipy-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d7425fcafbc09a03731e1bc05581f5fad988e48c6a861f441b7ab729a49a55ea", size = 37379586, upload-time = "2026-01-10T21:27:20.171Z" }, + { url = "https://files.pythonhosted.org/packages/b9/b6/8ac583d6da79e7b9e520579f03007cb006f063642afd6b2eeb16b890bf93/scipy-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:87b411e42b425b84777718cc41516b8a7e0795abfa8e8e1d573bf0ef014f0812", size = 36287211, upload-time = "2026-01-10T21:28:43.122Z" }, + { url = "https://files.pythonhosted.org/packages/55/fb/7db19e0b3e52f882b420417644ec81dd57eeef1bd1705b6f689d8ff93541/scipy-1.17.0-cp313-cp313-win_arm64.whl", hash = "sha256:357ca001c6e37601066092e7c89cca2f1ce74e2a520ca78d063a6d2201101df2", size = 24312646, upload-time = "2026-01-10T21:28:49.893Z" }, + { url = "https://files.pythonhosted.org/packages/20/b6/7feaa252c21cc7aff335c6c55e1b90ab3e3306da3f048109b8b639b94648/scipy-1.17.0-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:ec0827aa4d36cb79ff1b81de898e948a51ac0b9b1c43e4a372c0508c38c0f9a3", size = 31693194, upload-time = "2026-01-10T21:27:27.454Z" }, + { url = "https://files.pythonhosted.org/packages/76/bb/bbb392005abce039fb7e672cb78ac7d158700e826b0515cab6b5b60c26fb/scipy-1.17.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:819fc26862b4b3c73a60d486dbb919202f3d6d98c87cf20c223511429f2d1a97", size = 28365415, upload-time = "2026-01-10T21:27:34.26Z" }, + { url = "https://files.pythonhosted.org/packages/37/da/9d33196ecc99fba16a409c691ed464a3a283ac454a34a13a3a57c0d66f3a/scipy-1.17.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:363ad4ae2853d88ebcde3ae6ec46ccca903ea9835ee8ba543f12f575e7b07e4e", size = 20537232, upload-time = "2026-01-10T21:27:40.306Z" }, + { url = "https://files.pythonhosted.org/packages/56/9d/f4b184f6ddb28e9a5caea36a6f98e8ecd2a524f9127354087ce780885d83/scipy-1.17.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:979c3a0ff8e5ba254d45d59ebd38cde48fce4f10b5125c680c7a4bfe177aab07", size = 22791051, upload-time = "2026-01-10T21:27:46.539Z" }, + { url = "https://files.pythonhosted.org/packages/9b/9d/025cccdd738a72140efc582b1641d0dd4caf2e86c3fb127568dc80444e6e/scipy-1.17.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:130d12926ae34399d157de777472bf82e9061c60cc081372b3118edacafe1d00", size = 32815098, upload-time = "2026-01-10T21:27:54.389Z" }, + { url = "https://files.pythonhosted.org/packages/48/5f/09b879619f8bca15ce392bfc1894bd9c54377e01d1b3f2f3b595a1b4d945/scipy-1.17.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e886000eb4919eae3a44f035e63f0fd8b651234117e8f6f29bad1cd26e7bc45", size = 35031342, upload-time = "2026-01-10T21:28:03.012Z" }, + { url = "https://files.pythonhosted.org/packages/f2/9a/f0f0a9f0aa079d2f106555b984ff0fbb11a837df280f04f71f056ea9c6e4/scipy-1.17.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:13c4096ac6bc31d706018f06a49abe0485f96499deb82066b94d19b02f664209", size = 34893199, upload-time = "2026-01-10T21:28:10.832Z" }, + { url = "https://files.pythonhosted.org/packages/90/b8/4f0f5cf0c5ea4d7548424e6533e6b17d164f34a6e2fb2e43ffebb6697b06/scipy-1.17.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cacbaddd91fcffde703934897c5cd2c7cb0371fac195d383f4e1f1c5d3f3bd04", size = 37438061, upload-time = "2026-01-10T21:28:19.684Z" }, + { url = "https://files.pythonhosted.org/packages/f9/cc/2bd59140ed3b2fa2882fb15da0a9cb1b5a6443d67cfd0d98d4cec83a57ec/scipy-1.17.0-cp313-cp313t-win_amd64.whl", hash = "sha256:edce1a1cf66298cccdc48a1bdf8fb10a3bf58e8b58d6c3883dd1530e103f87c0", size = 36328593, upload-time = "2026-01-10T21:28:28.007Z" }, + { url = "https://files.pythonhosted.org/packages/13/1b/c87cc44a0d2c7aaf0f003aef2904c3d097b422a96c7e7c07f5efd9073c1b/scipy-1.17.0-cp313-cp313t-win_arm64.whl", hash = "sha256:30509da9dbec1c2ed8f168b8d8aa853bc6723fede1dbc23c7d43a56f5ab72a67", size = 24625083, upload-time = "2026-01-10T21:28:35.188Z" }, + { url = "https://files.pythonhosted.org/packages/1a/2d/51006cd369b8e7879e1c630999a19d1fbf6f8b5ed3e33374f29dc87e53b3/scipy-1.17.0-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:c17514d11b78be8f7e6331b983a65a7f5ca1fd037b95e27b280921fe5606286a", size = 31346803, upload-time = "2026-01-10T21:28:57.24Z" }, + { url = "https://files.pythonhosted.org/packages/d6/2e/2349458c3ce445f53a6c93d4386b1c4c5c0c540917304c01222ff95ff317/scipy-1.17.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:4e00562e519c09da34c31685f6acc3aa384d4d50604db0f245c14e1b4488bfa2", size = 27967182, upload-time = "2026-01-10T21:29:04.107Z" }, + { url = "https://files.pythonhosted.org/packages/5e/7c/df525fbfa77b878d1cfe625249529514dc02f4fd5f45f0f6295676a76528/scipy-1.17.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:f7df7941d71314e60a481e02d5ebcb3f0185b8d799c70d03d8258f6c80f3d467", size = 20139125, upload-time = "2026-01-10T21:29:10.179Z" }, + { url = "https://files.pythonhosted.org/packages/33/11/fcf9d43a7ed1234d31765ec643b0515a85a30b58eddccc5d5a4d12b5f194/scipy-1.17.0-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:aabf057c632798832f071a8dde013c2e26284043934f53b00489f1773b33527e", size = 22443554, upload-time = "2026-01-10T21:29:15.888Z" }, + { url = "https://files.pythonhosted.org/packages/80/5c/ea5d239cda2dd3d31399424967a24d556cf409fbea7b5b21412b0fd0a44f/scipy-1.17.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a38c3337e00be6fd8a95b4ed66b5d988bac4ec888fd922c2ea9fe5fb1603dd67", size = 32757834, upload-time = "2026-01-10T21:29:23.406Z" }, + { url = "https://files.pythonhosted.org/packages/b8/7e/8c917cc573310e5dc91cbeead76f1b600d3fb17cf0969db02c9cf92e3cfa/scipy-1.17.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00fb5f8ec8398ad90215008d8b6009c9db9fa924fd4c7d6be307c6f945f9cd73", size = 34995775, upload-time = "2026-01-10T21:29:31.915Z" }, + { url = "https://files.pythonhosted.org/packages/c5/43/176c0c3c07b3f7df324e7cdd933d3e2c4898ca202b090bd5ba122f9fe270/scipy-1.17.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f2a4942b0f5f7c23c7cd641a0ca1955e2ae83dedcff537e3a0259096635e186b", size = 34841240, upload-time = "2026-01-10T21:29:39.995Z" }, + { url = "https://files.pythonhosted.org/packages/44/8c/d1f5f4b491160592e7f084d997de53a8e896a3ac01cd07e59f43ca222744/scipy-1.17.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:dbf133ced83889583156566d2bdf7a07ff89228fe0c0cb727f777de92092ec6b", size = 37394463, upload-time = "2026-01-10T21:29:48.723Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ec/42a6657f8d2d087e750e9a5dde0b481fd135657f09eaf1cf5688bb23c338/scipy-1.17.0-cp314-cp314-win_amd64.whl", hash = "sha256:3625c631a7acd7cfd929e4e31d2582cf00f42fcf06011f59281271746d77e061", size = 37053015, upload-time = "2026-01-10T21:30:51.418Z" }, + { url = "https://files.pythonhosted.org/packages/27/58/6b89a6afd132787d89a362d443a7bddd511b8f41336a1ae47f9e4f000dc4/scipy-1.17.0-cp314-cp314-win_arm64.whl", hash = "sha256:9244608d27eafe02b20558523ba57f15c689357c85bdcfe920b1828750aa26eb", size = 24951312, upload-time = "2026-01-10T21:30:56.771Z" }, + { url = "https://files.pythonhosted.org/packages/e9/01/f58916b9d9ae0112b86d7c3b10b9e685625ce6e8248df139d0fcb17f7397/scipy-1.17.0-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:2b531f57e09c946f56ad0b4a3b2abee778789097871fc541e267d2eca081cff1", size = 31706502, upload-time = "2026-01-10T21:29:56.326Z" }, + { url = "https://files.pythonhosted.org/packages/59/8e/2912a87f94a7d1f8b38aabc0faf74b82d3b6c9e22be991c49979f0eceed8/scipy-1.17.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:13e861634a2c480bd237deb69333ac79ea1941b94568d4b0efa5db5e263d4fd1", size = 28380854, upload-time = "2026-01-10T21:30:01.554Z" }, + { url = "https://files.pythonhosted.org/packages/bd/1c/874137a52dddab7d5d595c1887089a2125d27d0601fce8c0026a24a92a0b/scipy-1.17.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:eb2651271135154aa24f6481cbae5cc8af1f0dd46e6533fb7b56aa9727b6a232", size = 20552752, upload-time = "2026-01-10T21:30:05.93Z" }, + { url = "https://files.pythonhosted.org/packages/3f/f0/7518d171cb735f6400f4576cf70f756d5b419a07fe1867da34e2c2c9c11b/scipy-1.17.0-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:c5e8647f60679790c2f5c76be17e2e9247dc6b98ad0d3b065861e082c56e078d", size = 22803972, upload-time = "2026-01-10T21:30:10.651Z" }, + { url = "https://files.pythonhosted.org/packages/7c/74/3498563a2c619e8a3ebb4d75457486c249b19b5b04a30600dfd9af06bea5/scipy-1.17.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5fb10d17e649e1446410895639f3385fd2bf4c3c7dfc9bea937bddcbc3d7b9ba", size = 32829770, upload-time = "2026-01-10T21:30:16.359Z" }, + { url = "https://files.pythonhosted.org/packages/48/d1/7b50cedd8c6c9d6f706b4b36fa8544d829c712a75e370f763b318e9638c1/scipy-1.17.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8547e7c57f932e7354a2319fab613981cde910631979f74c9b542bb167a8b9db", size = 35051093, upload-time = "2026-01-10T21:30:22.987Z" }, + { url = "https://files.pythonhosted.org/packages/e2/82/a2d684dfddb87ba1b3ea325df7c3293496ee9accb3a19abe9429bce94755/scipy-1.17.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:33af70d040e8af9d5e7a38b5ed3b772adddd281e3062ff23fec49e49681c38cf", size = 34909905, upload-time = "2026-01-10T21:30:28.704Z" }, + { url = "https://files.pythonhosted.org/packages/ef/5e/e565bd73991d42023eb82bb99e51c5b3d9e2c588ca9d4b3e2cc1d3ca62a6/scipy-1.17.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb55bb97d00f8b7ab95cb64f873eb0bf54d9446264d9f3609130381233483f", size = 37457743, upload-time = "2026-01-10T21:30:34.819Z" }, + { url = "https://files.pythonhosted.org/packages/58/a8/a66a75c3d8f1fb2b83f66007d6455a06a6f6cf5618c3dc35bc9b69dd096e/scipy-1.17.0-cp314-cp314t-win_amd64.whl", hash = "sha256:1ff269abf702f6c7e67a4b7aad981d42871a11b9dd83c58d2d2ea624efbd1088", size = 37098574, upload-time = "2026-01-10T21:30:40.782Z" }, + { url = "https://files.pythonhosted.org/packages/56/a5/df8f46ef7da168f1bc52cd86e09a9de5c6f19cc1da04454d51b7d4f43408/scipy-1.17.0-cp314-cp314t-win_arm64.whl", hash = "sha256:031121914e295d9791319a1875444d55079885bbae5bdc9c5e0f2ee5f09d34ff", size = 25246266, upload-time = "2026-01-10T21:30:45.923Z" }, ] [[package]] @@ -5928,7 +5930,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "matplotlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pandas", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/86/59/a451d7420a77ab0b98f7affa3a1d78a313d2f7281a57afb1a34bae8ab412/seaborn-0.13.2.tar.gz", hash = "sha256:93e60a40988f4d65e9f4885df477e2fdaff6b73a9ded434c1ab356dd57eefff7", size = 1457696, upload-time = "2024-01-25T13:21:52.551Z" } @@ -6422,51 +6424,56 @@ wheels = [ [[package]] name = "tomli" -version = "2.3.0" +version = "2.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, - { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, - { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, - { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, - { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, - { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, - { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, - { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, - { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, - { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, - { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, - { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, - { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, - { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, - { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, - { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, - { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, - { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, - { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, - { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, - { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, - { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, - { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, - { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, - { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, - { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, - { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, - { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, - { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, - { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, - { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, - { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, - { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, - { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, - { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, - { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, - { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, - { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, - { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, - { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, - { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" }, + { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" }, + { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" }, + { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" }, + { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" }, + { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" }, + { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" }, + { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" }, + { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" }, + { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" }, + { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" }, + { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" }, + { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" }, + { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" }, + { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" }, + { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" }, + { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" }, + { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" }, + { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" }, + { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" }, + { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" }, + { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" }, + { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" }, + { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" }, + { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" }, + { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" }, + { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" }, + { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" }, + { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" }, + { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" }, + { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" }, + { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" }, + { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" }, + { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" }, + { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" }, + { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" }, + { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" }, + { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" }, + { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" }, ] [[package]] @@ -6586,28 +6593,28 @@ wheels = [ [[package]] name = "uv" -version = "0.9.22" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a4/e1/298ce517fa2dbe54e0ec8e126c22fe66adedc3aaaba3e5d3c056d62a4d64/uv-0.9.22.tar.gz", hash = "sha256:41c73a4938818ede30e601cd0be87953e5c6a83dc4762e04e626f2eb9b240ebe", size = 3835119, upload-time = "2026-01-06T10:49:35.192Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/53/09/37811eeabacd13c7fe9b6604d967e417124794d42b45d8469d2f421edc10/uv-0.9.22-py3-none-linux_armv6l.whl", hash = "sha256:1f979c9d313b4616d9865859ef520bea5df0d4f15c57214589f5676fafa440c1", size = 21319484, upload-time = "2026-01-06T10:49:42.435Z" }, - { url = "https://files.pythonhosted.org/packages/5e/68/bb76c97c284ce7fb8efa868994c2510588faa7075e60d8865d1373e54b7b/uv-0.9.22-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b78f2605d65c4925631d891dec99b677b05f50c774dedc6ef8968039a5bcfdb0", size = 20446647, upload-time = "2026-01-06T10:49:13.942Z" }, - { url = "https://files.pythonhosted.org/packages/af/49/7230b1d56aeaee0eefd346a70f582463f11fb7036d2d020bcf68053bd994/uv-0.9.22-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2a4155cf7d0231d0adae94257ee10d70c57c2f592207536ddd55d924590a8c15", size = 18967861, upload-time = "2026-01-06T10:49:26.026Z" }, - { url = "https://files.pythonhosted.org/packages/70/cf/7b33e791c0cb63587bb1f03f067764fc681c0d1693a6b9a2670ef2f8a4e9/uv-0.9.22-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:0d8f007616cac5962620252b56a1d8224e9b2de566e78558efe04cc18526d507", size = 20807382, upload-time = "2026-01-06T10:49:28.37Z" }, - { url = "https://files.pythonhosted.org/packages/e3/72/5486eab5344a30257544b42dd15e85d5de2ff7fab952a7a6e21cc946efae/uv-0.9.22-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3b2bcce464186f8fafa3bf2aa5d82db4e3229366345399cc3f5bcafd616b8fe0", size = 20914561, upload-time = "2026-01-06T10:49:08.996Z" }, - { url = "https://files.pythonhosted.org/packages/90/67/974adc8fd1baace83efaa2409dd19e60accfbca25c473ed9af8e8188484d/uv-0.9.22-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3422b093b8e6e8de31261133b420c34dbef81f3fd1d82f787ac771b00b54adf8", size = 21996463, upload-time = "2026-01-06T10:49:16.587Z" }, - { url = "https://files.pythonhosted.org/packages/99/7a/ef06158af9141e3b526bcb84ecd84fd1eed7eabf64bc830f302796af8646/uv-0.9.22-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b807bafe6b65fc1fe9c65ffd0d4228db894872de96e7200c44943f24beb68931", size = 23547447, upload-time = "2026-01-06T10:49:44.84Z" }, - { url = "https://files.pythonhosted.org/packages/e4/d7/b7f389311777403ea5230eb816b2aca159980cb8a3de5b9adb53cf19aa2e/uv-0.9.22-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:369b55341c6236f42d8fc335876308e5c57c921850975b3019cc9f7ebbe31567", size = 23159198, upload-time = "2026-01-06T10:49:39.938Z" }, - { url = "https://files.pythonhosted.org/packages/e5/cc/64514ba1102f24cbcb6eed39b22fe6fd04297ce1068552ae3c5fae63725b/uv-0.9.22-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0cdc653fb601aa7f273242823fa93024f5fd319c66cdf22f36d784858493564c", size = 22147053, upload-time = "2026-01-06T10:49:19.094Z" }, - { url = "https://files.pythonhosted.org/packages/40/15/63fb7a6908db2f03716c4a50aea7e27a7440fe6a09854282c401139afaf7/uv-0.9.22-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f45e1e0f26dd47fa01eb421c54cfd39de10fd52ac0a9d7ae45b92fce7d92b0b", size = 22225812, upload-time = "2026-01-06T10:49:48.109Z" }, - { url = "https://files.pythonhosted.org/packages/ea/fe/def406c118ac215f4c392b077fb75303d125310cf178774a728e5759d9eb/uv-0.9.22-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:8f73043ade8ff6335e19fe1f4e7425d5e28aec9cafd72d13d5b40bb1cbb85690", size = 20933749, upload-time = "2026-01-06T10:49:23.817Z" }, - { url = "https://files.pythonhosted.org/packages/7f/45/27464cf8697f31858084c9c3f716b3c39d3f9a2a8e30c8a58dbd1a519e24/uv-0.9.22-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:59c4f6b3659a68c26c50865432a7134386f607432160aad51e2247f862902697", size = 22080639, upload-time = "2026-01-06T10:49:21.521Z" }, - { url = "https://files.pythonhosted.org/packages/1c/9a/7008f6aad89442ef00735b4f4c8d86eaaeaa7392f4dec814469d212eb462/uv-0.9.22-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:77ec4c101d41d7738226466191a7d62f9fa4de06ea580e0801da2f5cd5fa08aa", size = 20894483, upload-time = "2026-01-06T10:49:37.201Z" }, - { url = "https://files.pythonhosted.org/packages/fe/97/74b4c782d410e5f891446d1d27107b0312fae0e83c7be2edf6867c408f81/uv-0.9.22-py3-none-musllinux_1_1_i686.whl", hash = "sha256:b1985559b38663642658069e8d09fa6c30ed1c67654b7e5765240d9e4e9cdd57", size = 21453244, upload-time = "2026-01-06T10:49:52.878Z" }, - { url = "https://files.pythonhosted.org/packages/01/17/b3055b9f82f87a727ed5f745aadda9c363d2e2dd180f99350431e98c0ad4/uv-0.9.22-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:e4b61a9c8b8dcbf64e642d2052342d36a46886b8bc3ccc407282962b970101af", size = 22446527, upload-time = "2026-01-06T10:49:50.59Z" }, - { url = "https://files.pythonhosted.org/packages/2d/39/03ed466a5afb8bfc104096bb577b26ed7e413177fee699c756916ab35ef5/uv-0.9.22-py3-none-win32.whl", hash = "sha256:d9d4be990bb92a68781f7c98d2321b528667b61d565c02ba978488c0210aa768", size = 20088632, upload-time = "2026-01-06T10:49:11.725Z" }, - { url = "https://files.pythonhosted.org/packages/32/49/9e3e19ba756c4a5e6acb4ea74336d3035f7959254fbb05f5eb77bff067ed/uv-0.9.22-py3-none-win_amd64.whl", hash = "sha256:9c238525272506845fe07c0b9088c5e33fcd738e1f49ef49dc3c8112096d2e3a", size = 22160389, upload-time = "2026-01-06T10:49:32.826Z" }, - { url = "https://files.pythonhosted.org/packages/79/78/4a10b718290eb6b9ab436286420c430f9ad7afa0c1b03c43692d6289fe2f/uv-0.9.22-py3-none-win_arm64.whl", hash = "sha256:012bdc5285a9cdb091ac514b7eb8a707e3b649af5355fe4afb4920bfe1958c00", size = 20556116, upload-time = "2026-01-06T10:49:30.493Z" }, +version = "0.9.24" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/7f/6692596de7775b3059a55539aed2eec16a0642a2d6d3510baa5878287ce4/uv-0.9.24.tar.gz", hash = "sha256:d59d31c25fc530c68db9164174efac511a25fc882cec49cd48f75a18e7ebd6d5", size = 3852673, upload-time = "2026-01-09T22:34:31.635Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b8/51/10bb9541c40a5b4672527c357997a30fdf38b75e7bbaad0c37ed70889efa/uv-0.9.24-py3-none-linux_armv6l.whl", hash = "sha256:75a000f529ec92235b10fb5e16ca41f23f46c643308fd6c5b0d7b73ca056c5b9", size = 21395664, upload-time = "2026-01-09T22:34:05.887Z" }, + { url = "https://files.pythonhosted.org/packages/ec/dd/d7df524cb764ebc652e0c8bf9abe55fc34391adc2e4ab1d47375222b38a9/uv-0.9.24-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:207c8a2d4c4d55589feb63b4be74f6ff6ab92fa81b14a6515007ccec5a868ae0", size = 20547988, upload-time = "2026-01-09T22:34:16.21Z" }, + { url = "https://files.pythonhosted.org/packages/49/e4/7ca5e7eaed4b2b9d407aa5aeeb8f71cace7db77f30a63139bbbfdfe4770c/uv-0.9.24-py3-none-macosx_11_0_arm64.whl", hash = "sha256:44c0b8a78724e4cfa8e9c0266023c70fc792d0b39a5da17f5f847af2b530796b", size = 19033208, upload-time = "2026-01-09T22:33:50.91Z" }, + { url = "https://files.pythonhosted.org/packages/27/05/b7bab99541056537747bfdc55fdc97a4ba998e2b53cf855411ef176c412b/uv-0.9.24-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:841ede01d6dcf1676a21dce05f3647ba171c1d92768a03e8b8b6b7354b34a6d2", size = 20872212, upload-time = "2026-01-09T22:33:58.007Z" }, + { url = "https://files.pythonhosted.org/packages/d3/93/3a69cf481175766ee6018afb281666de12ccc04367d20a41dc070be8b422/uv-0.9.24-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:69531d9a8772afb2dff68fef2469f666e4f8a0132b2109e36541c423415835da", size = 21017966, upload-time = "2026-01-09T22:34:29.354Z" }, + { url = "https://files.pythonhosted.org/packages/17/40/7aec2d428e57a3ec992efc49bbc71e4a0ceece5a726751c661ddc3f41315/uv-0.9.24-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6720c9939cca7daff3cccc35dd896bbe139d7d463c62cba8dbbc474ff8eb93d1", size = 21943358, upload-time = "2026-01-09T22:34:08.63Z" }, + { url = "https://files.pythonhosted.org/packages/c8/f4/2aa5b275aa8e5edb659036e94bae13ae294377384cf2a93a8d742a38050f/uv-0.9.24-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d7d1333d9c21088c89cb284ef29fdf48dc2015fe993174a823a3e7c991db90f9", size = 23672949, upload-time = "2026-01-09T22:34:03.113Z" }, + { url = "https://files.pythonhosted.org/packages/8e/24/2589bed4b39394c799472f841e0580318a8b7e69ef103a0ab50cf1c39dff/uv-0.9.24-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b610d89d6025000d08cd9bd458c6e264003a0ecfdaa8e4eba28938130cd1837", size = 23270210, upload-time = "2026-01-09T22:34:13.94Z" }, + { url = "https://files.pythonhosted.org/packages/80/3a/034494492a1ad1f95371c6fd735e4b7d180b8c1712c88b0f32a34d6352fd/uv-0.9.24-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:38c59e18fe5fa42f7baeb4f08c94914cee6d87ff8faa6fc95c994dbc0de26c90", size = 22282247, upload-time = "2026-01-09T22:33:53.362Z" }, + { url = "https://files.pythonhosted.org/packages/be/0e/d8ab2c4fa6c9410a8a37fa6608d460b0126cee2efed9eecf516cdec72a1a/uv-0.9.24-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:009cc82cdfc48add6ec13a0c4ffbb788ae2cab53573b4218069ca626721a404b", size = 22348801, upload-time = "2026-01-09T22:34:00.46Z" }, + { url = "https://files.pythonhosted.org/packages/50/fa/7217764e4936d6fda1944d956452bf94f790ae8a02cb3e5aa496d23fcb25/uv-0.9.24-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:1914d33e526167dc202ec4a59119c68467b31f7c71dcf8b1077571d091ca3e7c", size = 21000825, upload-time = "2026-01-09T22:34:21.811Z" }, + { url = "https://files.pythonhosted.org/packages/94/8f/533db58a36895142b0c11eedf8bfe11c4724fb37deaa417bfb0c689d40b8/uv-0.9.24-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:aafe7dd9b633672054cf27f1a8e4127506324631f1af5edd051728f4f8085351", size = 22149066, upload-time = "2026-01-09T22:33:45.722Z" }, + { url = "https://files.pythonhosted.org/packages/cf/c7/e6eccd96341a548f0405bffdf55e7f30b5c0757cd1b8f7578e0972a66002/uv-0.9.24-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:63a0a46693098cf8446e41bd5d9ce7d3bc9b775a63fe0c8405ab6ee328424d46", size = 20993489, upload-time = "2026-01-09T22:34:27.007Z" }, + { url = "https://files.pythonhosted.org/packages/46/07/32d852d2d40c003b52601c44202c9d9e655c485fae5d84e42f326814b0be/uv-0.9.24-py3-none-musllinux_1_1_i686.whl", hash = "sha256:15d3955bfb03a7b78aaf5afb639cedefdf0fc35ff844c92e3fe6e8700b94b84f", size = 21400775, upload-time = "2026-01-09T22:34:24.278Z" }, + { url = "https://files.pythonhosted.org/packages/b0/58/f8e94226126011ba2e2e9d59c6190dc7fe9e61fa7ef4ca720d7226c1482b/uv-0.9.24-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:488a07e59fb417bf86de5630197223b7a0223229e626afc124c26827db78cff8", size = 22554194, upload-time = "2026-01-09T22:34:18.504Z" }, + { url = "https://files.pythonhosted.org/packages/da/8e/b540c304039a6561ba8e9a673009cfe1451f989d2269fe40690901ddb233/uv-0.9.24-py3-none-win32.whl", hash = "sha256:68a3186074c03876ee06b68730d5ff69a430296760d917ebbbb8e3fb54fb4091", size = 20203184, upload-time = "2026-01-09T22:34:11.02Z" }, + { url = "https://files.pythonhosted.org/packages/16/59/dba7c5feec1f694183578435eaae0d759b8c459c5e4f91237a166841a116/uv-0.9.24-py3-none-win_amd64.whl", hash = "sha256:8cd626306b415491f839b1a9100da6795c82c44d4cf278dd7ace7a774af89df4", size = 22294050, upload-time = "2026-01-09T22:33:48.228Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ef/e58fb288bafb5a8b5d4994e73fa6e062e408680e5a20d0427d5f4f66d8b1/uv-0.9.24-py3-none-win_arm64.whl", hash = "sha256:8d3c0fec7aa17f936a5b258816e856647b21f978a81bcfb2dc8caf2892a4965e", size = 20620004, upload-time = "2026-01-09T22:33:55.62Z" }, ] [[package]] @@ -6658,7 +6665,7 @@ wheels = [ [[package]] name = "virtualenv" -version = "20.36.0" +version = "20.36.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "distlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -6666,9 +6673,9 @@ dependencies = [ { name = "platformdirs", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/78/49/87e23d8f742f10f965bce5d6b285fc88a4f436b11daf6b6225d4d66f8492/virtualenv-20.36.0.tar.gz", hash = "sha256:a3601f540b515a7983508113f14e78993841adc3d83710fa70f0ac50f43b23ed", size = 6032237, upload-time = "2026-01-07T17:20:04.975Z" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/a3/4d310fa5f00863544e1d0f4de93bddec248499ccf97d4791bc3122c9d4f3/virtualenv-20.36.1.tar.gz", hash = "sha256:8befb5c81842c641f8ee658481e42641c68b5eab3521d8e092d18320902466ba", size = 6032239, upload-time = "2026-01-09T18:21:01.296Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/eb/6a/0af36875e0023a1f2d0b66b4051721fc26740e947696922df1665b75e5d3/virtualenv-20.36.0-py3-none-any.whl", hash = "sha256:e7ded577f3af534fd0886d4ca03277f5542053bedb98a70a989d3c22cfa5c9ac", size = 6008261, upload-time = "2026-01-07T17:20:02.87Z" }, + { url = "https://files.pythonhosted.org/packages/6a/2a/dc2228b2888f51192c7dc766106cd475f1b768c10caaf9727659726f7391/virtualenv-20.36.1-py3-none-any.whl", hash = "sha256:575a8d6b124ef88f6f51d56d656132389f961062a9177016a50e4f507bbcc19f", size = 6008258, upload-time = "2026-01-09T18:20:59.425Z" }, ] [[package]]