From 0fa7517d2e7af0091232ef3877e7d6efb007a4e5 Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Mon, 12 May 2025 23:22:44 +0000 Subject: [PATCH] CodeGen from PR 34618 in Azure/azure-rest-api-specs Merge c2069a48a8b46228c9630469726b36133eed3947 into e70895d3c077412514b402fb3030235dc0de5a5d --- sdk/ai/azure-ai-agents/_meta.json | 6 + .../azure-ai-agents/apiview-properties.json | 12 +- sdk/ai/azure-ai-agents/azure/ai/__init__.py | 1 - .../azure-ai-agents/azure/ai/agents/_patch.py | 986 +------- .../azure-ai-agents/azure/ai/agents/_types.py | 1 - .../azure/ai/agents/_utils/model_base.py | 2 +- .../azure/ai/agents/aio/_patch.py | 986 +------- .../ai/agents/aio/operations/_operations.py | 600 +++-- .../azure/ai/agents/aio/operations/_patch.py | 2229 +---------------- .../azure/ai/agents/models/__init__.py | 20 - .../azure/ai/agents/models/_enums.py | 6 - .../azure/ai/agents/models/_models.py | 411 +-- .../azure/ai/agents/models/_patch.py | 1825 +------------- .../azure/ai/agents/operations/_operations.py | 638 ++--- .../azure/ai/agents/operations/_patch.py | 2227 +--------------- .../azure/ai/agents/telemetry/__init__.py | 13 - .../telemetry/_ai_agents_instrumentor.py | 2168 ---------------- .../telemetry/_instrument_paged_wrappers.py | 142 -- .../ai/agents/telemetry/_trace_function.py | 204 -- .../azure/ai/agents/telemetry/_utils.py | 155 -- .../utils/agent_trace_configurator.py | 4 +- ...basics_async_with_azure_monitor_tracing.py | 1 + ...gents_basics_with_azure_monitor_tracing.py | 1 + ...eventhandler_with_azure_monitor_tracing.py | 1 + ...ents_toolset_with_azure_monitor_tracing.py | 3 +- sdk/ai/azure-ai-agents/sdk_packaging.toml | 2 + .../{azure => servicepatterns}/__init__.py | 0 .../servicepatterns/models/__init__.py | 29 + .../servicepatterns/models/_models.py | 82 + .../servicepatterns/models/_patch.py | 21 + sdk/ai/azure-ai-agents/setup.py | 10 - sdk/ai/azure-ai-agents/tsp-location.yaml | 2 +- 32 files changed, 866 insertions(+), 11922 deletions(-) create mode 100644 sdk/ai/azure-ai-agents/_meta.json delete mode 100644 sdk/ai/azure-ai-agents/azure/ai/__init__.py delete mode 100644 sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/__init__.py delete mode 100644 sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_ai_agents_instrumentor.py delete mode 100644 sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_instrument_paged_wrappers.py delete mode 100644 sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_trace_function.py delete mode 100644 sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_utils.py create mode 100644 sdk/ai/azure-ai-agents/sdk_packaging.toml rename sdk/ai/azure-ai-agents/{azure => servicepatterns}/__init__.py (100%) create mode 100644 sdk/ai/azure-ai-agents/servicepatterns/models/__init__.py create mode 100644 sdk/ai/azure-ai-agents/servicepatterns/models/_models.py create mode 100644 sdk/ai/azure-ai-agents/servicepatterns/models/_patch.py diff --git a/sdk/ai/azure-ai-agents/_meta.json b/sdk/ai/azure-ai-agents/_meta.json new file mode 100644 index 000000000000..4e77dc82d72c --- /dev/null +++ b/sdk/ai/azure-ai-agents/_meta.json @@ -0,0 +1,6 @@ +{ + "commit": "d002441e30d6db280f92fe19b885cc68279c591c", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/ai/Azure.AI.Agents", + "@azure-tools/typespec-python": "0.44.2" +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-agents/apiview-properties.json b/sdk/ai/azure-ai-agents/apiview-properties.json index d16ea226797e..ed7b46974347 100644 --- a/sdk/ai/azure-ai-agents/apiview-properties.json +++ b/sdk/ai/azure-ai-agents/apiview-properties.json @@ -7,6 +7,8 @@ "azure.ai.agents.models.AgentsResponseFormat": "Azure.AI.Agents.AgentsResponseFormat", "azure.ai.agents.models.AgentThread": "Azure.AI.Agents.AgentThread", "azure.ai.agents.models.AgentThreadCreationOptions": "Azure.AI.Agents.AgentThreadCreationOptions", + "servicepatterns.models.AgentV1Error": "ServicePatterns.AgentV1Error", + "servicepatterns.models.AgentV1ErrorError": "ServicePatterns.AgentV1Error.error.anonymous", "azure.ai.agents.models.AISearchIndexResource": "Azure.AI.Agents.AISearchIndexResource", "azure.ai.agents.models.AzureAISearchResource": "Azure.AI.Agents.AzureAISearchResource", "azure.ai.agents.models.ToolDefinition": "Azure.AI.Agents.ToolDefinition", @@ -15,9 +17,6 @@ "azure.ai.agents.models.AzureFunctionDefinition": "Azure.AI.Agents.AzureFunctionDefinition", "azure.ai.agents.models.AzureFunctionStorageQueue": "Azure.AI.Agents.AzureFunctionStorageQueue", "azure.ai.agents.models.AzureFunctionToolDefinition": "Azure.AI.Agents.AzureFunctionToolDefinition", - "azure.ai.agents.models.BingCustomSearchConfiguration": "Azure.AI.Agents.BingCustomSearchConfiguration", - "azure.ai.agents.models.BingCustomSearchConfigurationList": "Azure.AI.Agents.BingCustomSearchConfigurationList", - "azure.ai.agents.models.BingCustomSearchToolDefinition": "Azure.AI.Agents.BingCustomSearchToolDefinition", "azure.ai.agents.models.BingGroundingSearchConfiguration": "Azure.AI.Agents.BingGroundingSearchConfiguration", "azure.ai.agents.models.BingGroundingSearchConfigurationList": "Azure.AI.Agents.BingGroundingSearchConfigurationList", "azure.ai.agents.models.BingGroundingToolDefinition": "Azure.AI.Agents.BingGroundingToolDefinition", @@ -71,7 +70,6 @@ "azure.ai.agents.models.MessageTextFilePathDetails": "Azure.AI.Agents.MessageTextFilePathDetails", "azure.ai.agents.models.MessageTextUrlCitationAnnotation": "Azure.AI.Agents.MessageTextUrlCitationAnnotation", "azure.ai.agents.models.MessageTextUrlCitationDetails": "Azure.AI.Agents.MessageTextUrlCitationDetails", - "azure.ai.agents.models.MicrosoftFabricToolDefinition": "Azure.AI.Agents.MicrosoftFabricToolDefinition", "azure.ai.agents.models.OpenApiAuthDetails": "Azure.AI.Agents.OpenApiAuthDetails", "azure.ai.agents.models.OpenApiAnonymousAuthDetails": "Azure.AI.Agents.OpenApiAnonymousAuthDetails", "azure.ai.agents.models.OpenApiConnectionAuthDetails": "Azure.AI.Agents.OpenApiConnectionAuthDetails", @@ -91,7 +89,6 @@ "azure.ai.agents.models.RunStep": "Azure.AI.Agents.RunStep", "azure.ai.agents.models.RunStepToolCall": "Azure.AI.Agents.RunStepToolCall", "azure.ai.agents.models.RunStepAzureAISearchToolCall": "Azure.AI.Agents.RunStepAzureAISearchToolCall", - "azure.ai.agents.models.RunStepBingCustomSearchToolCall": "Azure.AI.Agents.RunStepBingCustomSearchToolCall", "azure.ai.agents.models.RunStepBingGroundingToolCall": "Azure.AI.Agents.RunStepBingGroundingToolCall", "azure.ai.agents.models.RunStepCodeInterpreterToolCallOutput": "Azure.AI.Agents.RunStepCodeInterpreterToolCallOutput", "azure.ai.agents.models.RunStepCodeInterpreterImageOutput": "Azure.AI.Agents.RunStepCodeInterpreterImageOutput", @@ -125,19 +122,14 @@ "azure.ai.agents.models.RunStepFunctionToolCallDetails": "Azure.AI.Agents.RunStepFunctionToolCallDetails", "azure.ai.agents.models.RunStepMessageCreationDetails": "Azure.AI.Agents.RunStepMessageCreationDetails", "azure.ai.agents.models.RunStepMessageCreationReference": "Azure.AI.Agents.RunStepMessageCreationReference", - "azure.ai.agents.models.RunStepMicrosoftFabricToolCall": "Azure.AI.Agents.RunStepMicrosoftFabricToolCall", "azure.ai.agents.models.RunStepOpenAPIToolCall": "Azure.AI.Agents.RunStepOpenAPIToolCall", - "azure.ai.agents.models.RunStepSharepointToolCall": "Azure.AI.Agents.RunStepSharepointToolCall", "azure.ai.agents.models.RunStepToolCallDetails": "Azure.AI.Agents.RunStepToolCallDetails", - "azure.ai.agents.models.SharepointToolDefinition": "Azure.AI.Agents.SharepointToolDefinition", "azure.ai.agents.models.SubmitToolOutputsAction": "Azure.AI.Agents.SubmitToolOutputsAction", "azure.ai.agents.models.SubmitToolOutputsDetails": "Azure.AI.Agents.SubmitToolOutputsDetails", "azure.ai.agents.models.ThreadDeletionStatus": "Azure.AI.Agents.ThreadDeletionStatus", "azure.ai.agents.models.ThreadMessage": "Azure.AI.Agents.ThreadMessage", "azure.ai.agents.models.ThreadMessageOptions": "Azure.AI.Agents.ThreadMessageOptions", "azure.ai.agents.models.ThreadRun": "Azure.AI.Agents.ThreadRun", - "azure.ai.agents.models.ToolConnection": "Azure.AI.Agents.ToolConnection", - "azure.ai.agents.models.ToolConnectionList": "Azure.AI.Agents.ToolConnectionList", "azure.ai.agents.models.ToolOutput": "Azure.AI.Agents.ToolOutput", "azure.ai.agents.models.ToolResources": "Azure.AI.Agents.ToolResources", "azure.ai.agents.models.TruncationObject": "Azure.AI.Agents.TruncationObject", diff --git a/sdk/ai/azure-ai-agents/azure/ai/__init__.py b/sdk/ai/azure-ai-agents/azure/ai/__init__.py deleted file mode 100644 index d55ccad1f573..000000000000 --- a/sdk/ai/azure-ai-agents/azure/ai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/_patch.py b/sdk/ai/azure-ai-agents/azure/ai/agents/_patch.py index 0350c59656f3..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/_patch.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/_patch.py @@ -1,987 +1,15 @@ -# pylint: disable=line-too-long,useless-suppression,too-many-lines -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import io -import logging -import sys -import os -import time -from typing import ( - IO, - Any, - Dict, - List, - Optional, - Union, - Callable, - Set, - overload, -) +from typing import List -from azure.core.credentials import TokenCredential -from azure.core.tracing.decorator import distributed_trace - -from . import models as _models -from ._client import AgentsClient as AgentsClientGenerated -from .operations._patch import _has_errors_in_toolcalls_output -from . import _types - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - -logger = logging.getLogger(__name__) - - -class AgentsClient(AgentsClientGenerated): # pylint: disable=client-accepts-api-version-keyword - - def __init__(self, endpoint: str, credential: TokenCredential, **kwargs: Any) -> None: - if not endpoint: - raise ValueError("Please provide the 1DP endpoint.") - # TODO: Remove this custom code when 1DP service will be available - parts = endpoint.split(";") - # Detect legacy endpoint and build it in old way only in tests. - if os.environ.get("AZURE_AI_AGENTS_TESTS_IS_TEST_RUN") == "True" and len(parts) == 4: - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - endpoint = ( - f"{endpoint}/agents/v1.0/subscriptions" - f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" - f"/Microsoft.MachineLearningServices/workspaces/{project_name}" - ) - # Override the credential scope with the legacy one. - kwargs["credential_scopes"] = ["https://management.azure.com/.default"] - kwargs["api_version"] = "2025_05_01" - # End of legacy endpoints handling. - super().__init__(endpoint, credential, **kwargs) - - # Create and store your function tool + retry limit on the client instance. - self._function_tool = _models.FunctionTool(set()) - self._function_tool_max_retry = 10 - - # Inject them into the RunsOperations instance so that run operations can use them. - self.runs._function_tool = self._function_tool - self.runs._function_tool_max_retry = self._function_tool_max_retry - - # pylint: disable=arguments-differ - @overload - def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.agents.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.agents.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :type body: Union[JSON, IO[bytes]] - :keyword model: The ID of the model to use. Required if `body` is not provided. - :paramtype model: str - :keyword name: The name of the new agent. - :paramtype name: Optional[str] - :keyword description: A description for the new agent. - :paramtype description: Optional[str] - :keyword instructions: System instructions for the agent. - :paramtype instructions: Optional[str] - :keyword tools: List of tools definitions for the agent. - :paramtype tools: Optional[List[_models.ToolDefinition]] - :keyword tool_resources: Resources used by the agent's tools. - :paramtype tool_resources: Optional[_models.ToolResources] - :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :paramtype toolset: Optional[_models.ToolSet] - :keyword temperature: Sampling temperature for generating agent responses. - :paramtype temperature: Optional[float] - :keyword top_p: Nucleus sampling parameter. - :paramtype top_p: Optional[float] - :keyword response_format: Response format for tool calls. - :paramtype response_format: Optional["_types.AgentsResponseFormatOption"] - :keyword metadata: Key/value pairs for storing additional information. - :paramtype metadata: Optional[Dict[str, str]] - :keyword content_type: Content type of the body. - :paramtype content_type: str - :return: An Agent object. - :rtype: _models.Agent - :raises: HttpResponseError for HTTP errors. - """ - - self._validate_tools_and_tool_resources(tools, tool_resources) - - if body is not _Unset: - if isinstance(body, io.IOBase): - return super().create_agent(body=body, content_type=content_type, **kwargs) - return super().create_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - new_agent = super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - return new_agent - - # pylint: disable=arguments-differ - @overload - def update_agent( # pylint: disable=arguments-differ - self, - agent_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.agents.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def update_agent( # pylint: disable=arguments-differ - self, - agent_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.agents.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_agent( - self, - agent_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.agents.models.ToolResources - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.agents.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - self._validate_tools_and_tool_resources(tools, tool_resources) - - if body is not _Unset: - if isinstance(body, io.IOBase): - return super().update_agent(body=body, content_type=content_type, **kwargs) - return super().update_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - return super().update_agent( - agent_id=agent_id, - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def _validate_tools_and_tool_resources( - self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] - ): - if tool_resources is None: - return - if tools is None: - tools = [] - - if tool_resources.file_search is not None and not any( - isinstance(tool, _models.FileSearchToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" - ) - if tool_resources.code_interpreter is not None and not any( - isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" - ) - - @distributed_trace - def delete_agent( # pylint: disable=delete-operation-wrong-return-type - self, agent_id: str, **kwargs: Any - ) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param agent_id: Identifier of the agent. Required. - :type agent_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - return super().delete_agent(agent_id, **kwargs) - - @distributed_trace - def enable_auto_function_calls( # pylint: disable=client-method-missing-kwargs - self, - tools: Union[Set[Callable[..., Any]], _models.FunctionTool, _models.ToolSet], - max_retry: int = 10, - ) -> None: - """Enables tool calls to be executed automatically during runs.create_and_process or runs.stream. - If this is not set, functions must be called manually. - If automatic function calls fail, the agents will receive error messages allowing it to retry with another - function call or figure out the answer with its knowledge. - - :param tools: A function tool, toolset, or a set of callable functions. - :type tools: Union[Set[Callable[..., Any]], _models.AsyncFunctionTool, _models.AsyncToolSet] - :param max_retry: Maximum number of errors allowed and retry per run or stream. Default value is 10. - :type max_retry: int - """ - if isinstance(tools, _models.FunctionTool): - self._function_tool = tools - elif isinstance(tools, _models.ToolSet): - tool = tools.get_tool(_models.FunctionTool) - self._function_tool = tool - else: - self._function_tool = _models.FunctionTool(tools) - - self._function_tool_max_retry = max_retry - - # Propagate into the RunsOperations instance - # pylint: disable=protected-access - self.runs._function_tool = self._function_tool - self.runs._function_tool_max_retry = self._function_tool_max_retry - # pylint: enable=protected-access - - @overload - def create_thread_and_run( - self, - *, - agent_id: str, - content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """ - Creates a new agent thread and immediately starts a run using that new thread. - - :keyword agent_id: The ID of the agent for which the thread should be created. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type for JSON body. Default is "application/json". - :type content_type: str - :keyword thread: The details used to create the new thread. If none provided, an empty thread is - created. - :type thread: ~azure.ai.agents.models.AgentThreadCreationOptions - :keyword model: Override the model the agent uses for this run. - :type model: str - :keyword instructions: Override the system instructions for this run. - :type instructions: str - :keyword tools: Override the list of enabled tools for this run. - :type tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. - :type tool_resources: ~azure.ai.agents.models.UpdateToolResourcesOptions - :keyword temperature: Sampling temperature between 0 and 2. Higher = more random. - :type temperature: float - :keyword top_p: Nucleus sampling parameter between 0 and 1. - :type top_p: float - :keyword max_prompt_tokens: Max prompt tokens to use across the run. - :type max_prompt_tokens: int - :keyword max_completion_tokens: Max completion tokens to use across the run. - :type max_completion_tokens: int - :keyword truncation_strategy: Strategy for dropping old messages as context grows. - :type truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls which tool the model will call. - :type tool_choice: str or - ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format the model must output. - :type response_format: str or - ~azure.ai.agents.models.AgentsApiResponseFormatMode or - ~azure.ai.agents.models.AgentsApiResponseFormat or - ~azure.ai.agents.models.ResponseFormatJsonSchemaType - :keyword parallel_tool_calls: If True, tools will be invoked in parallel. - :type parallel_tool_calls: bool - :keyword metadata: Up to 16 key/value pairs for structured metadata on the run. - :type metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping. - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread_and_run( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """ - Creates a new agent thread and immediately starts a run using a JSON body. - - :param body: The request payload as a JSON-serializable dict. - :type body: JSON - :keyword content_type: Body Parameter content-type for JSON body. Default is "application/json". - :type content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping. - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread_and_run( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """ - Creates a new agent thread and immediately starts a run using a binary body. - - :param body: The request payload as a byte-stream. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type for binary body. Default is "application/json". - :type content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping. - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_thread_and_run( # type: ignore - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - agent_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional[_types.AgentsToolChoiceOption] = None, - response_format: Optional[_types.AgentsResponseFormatOption] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """ - Creates a new agent thread and immediately starts a run using the specified parameters. - - :param body: Either a JSON payload (dict) or a binary stream (IO[bytes]). Use JSON overload for - dict bodies and binary overload for IO[bytes]. - :type body: JSON or IO[bytes] - :keyword agent_id: The ID of the agent for which the thread should be created. - Required when not using the JSON/body overload. - :type agent_id: str - :keyword thread: The details used to create the new thread. If none provided, an empty thread is - created. - :type thread: ~azure.ai.agents.models.AgentThreadCreationOptions - :keyword model: Override the model the agent uses for this run. - :type model: str - :keyword instructions: Override the system instructions for this run. - :type instructions: str - :keyword tools: Override the list of enabled tools for this run. - :type tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. - :type tool_resources: ~azure.ai.agents.models.UpdateToolResourcesOptions - :keyword temperature: Sampling temperature between 0 and 2. Higher = more random. - :type temperature: float - :keyword top_p: Nucleus sampling parameter between 0 and 1. - :type top_p: float - :keyword max_prompt_tokens: Max prompt tokens to use across the run. - :type max_prompt_tokens: int - :keyword max_completion_tokens: Max completion tokens to use across the run. - :type max_completion_tokens: int - :keyword truncation_strategy: Strategy for dropping old messages as context grows. - :type truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls which tool the model will call. - :type tool_choice: str or - ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format the model must output. - :type response_format: str or - ~azure.ai.agents.models.AgentsApiResponseFormatMode or - ~azure.ai.agents.models.AgentsApiResponseFormat or - ~azure.ai.agents.models.ResponseFormatJsonSchemaType - :keyword parallel_tool_calls: If True, tools will be invoked in parallel. - :type parallel_tool_calls: bool - :keyword metadata: Up to 16 key/value pairs for structured metadata on the run. - :type metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping. - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ValueError: If the combination of arguments is invalid. - :raises ~azure.core.exceptions.HttpResponseError: - """ - # JSON‐body overload - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - return super().create_thread_and_run(body, content_type=content_type, **kwargs) # JSON payload - - # Binary‐body overload - if isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - return super().create_thread_and_run(body, content_type=content_type, **kwargs) # binary stream - - # Keyword‐only overload - if agent_id is not _Unset: - return super().create_thread_and_run( - agent_id=agent_id, - thread=thread, - model=model, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - stream_parameter=False, # force non‐streaming - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - # Nothing matched - raise ValueError( - "Invalid arguments for create_thread_and_run(). " - "Provide either a JSON dict, a binary IO[bytes], or keyword parameters including 'agent_id'." - ) - - @distributed_trace - def create_thread_and_process_run( - self, - *, - agent_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - polling_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """ - Creates a new agent thread and run in one call, then polls until the run enters a terminal - state, executing any required tool calls via the provided ToolSet. - - :keyword agent_id: The unique identifier of the agent to run. Required if `body` is unset. - :type agent_id: str - :keyword thread: Options for creating the new thread (initial messages, metadata, tool resources). - :type thread: ~azure.ai.agents.models.AgentThreadCreationOptions - :keyword model: Optional override of the model deployment name to use for this run. - :type model: str, optional - :keyword instructions: Optional override of the system instructions for this run. - :type instructions: str, optional - :keyword toolset: A ToolSet instance containing both `.definitions` and `.resources` for tools. - If provided, its definitions/resources are used; otherwise no tools are passed. - :type toolset: azure.ai.agents._tools.ToolSet, optional - :keyword temperature: Sampling temperature for the model (0.0-2.0), higher is more random. - :type temperature: float, optional - :keyword top_p: Nucleus sampling value (0.0-1.0), alternative to temperature. - :type top_p: float, optional - :keyword max_prompt_tokens: Maximum total prompt tokens across turns; run ends “incomplete” if exceeded. - :type max_prompt_tokens: int, optional - :keyword max_completion_tokens: Maximum total completion tokens across turns; run ends “incomplete” if exceeded. - :type max_completion_tokens: int, optional - :keyword truncation_strategy: Strategy for dropping old messages when context window overflows. - :type truncation_strategy: ~azure.ai.agents.models.TruncationObject, optional - :keyword tool_choice: Controls which tool (if any) the model is allowed to call. - :type tool_choice: str or ~azure.ai.agents.models.AgentsToolChoiceOption, optional - :keyword response_format: Specifies the required format for the model’s output. - :type response_format: str or ~azure.ai.agents.models.AgentsResponseFormatOption, optional - :keyword parallel_tool_calls: If True, allows tool calls to be executed in parallel. - :type parallel_tool_calls: bool, optional - :keyword metadata: Optional metadata (up to 16 key/value pairs) to attach to the run. - :type metadata: dict[str, str], optional - :keyword polling_interval: Seconds to wait between polling attempts for run status. Default is 1. - :type polling_interval: int, optional - :return: The final ThreadRun object, in a terminal state (succeeded, failed, or cancelled). - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - If the underlying REST call to create the thread+run or to poll fails. - """ - tools = toolset.definitions if toolset else None - tool_resources = toolset.resources if toolset else None - - run = self.create_thread_and_run( - agent_id=agent_id, - thread=thread, - model=model, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - current_retry = 0 - # keep polling until we leave a “running” or “queued” or “requires_action” state - while run.status in ( - _models.RunStatus.QUEUED, - _models.RunStatus.IN_PROGRESS, - _models.RunStatus.REQUIRES_ACTION, - ): - time.sleep(polling_interval) - run = self.runs.get(thread_id=run.thread_id, run_id=run.id) - - # If the model requests tool calls, execute and submit them - if run.status == _models.RunStatus.REQUIRES_ACTION and isinstance( - run.required_action, _models.SubmitToolOutputsAction - ): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.warning("No tool calls provided - cancelling run") - self.runs.cancel(thread_id=run.thread_id, run_id=run.id) - break - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = _models.ToolSet() - toolset.add(self._function_tool) - tool_outputs = toolset.execute_tool_calls(tool_calls) - - if _has_errors_in_toolcalls_output(tool_outputs): - if current_retry >= self._function_tool_max_retry: # pylint:disable=no-else-return - logger.warning( - "Tool outputs contain errors - reaching max retry %s", self._function_tool_max_retry - ) - return self.runs.cancel(thread_id=run.thread_id, run_id=run.id) - else: - logger.warning("Tool outputs contain errors - retrying") - current_retry += 1 - - logger.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - run2 = self.runs.submit_tool_outputs( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs - ) - logger.debug("Tool outputs submitted to run: %s", run2.id) - - logger.debug("Current run ID: %s with status: %s", run.id, run.status) - - return run - - -__all__: List[str] = ["AgentsClient"] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/_types.py b/sdk/ai/azure-ai-agents/azure/ai/agents/_types.py index 9b16b6083887..8ff24fb65ae5 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/_types.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/_types.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/_utils/model_base.py b/sdk/ai/azure-ai-agents/azure/ai/agents/_utils/model_base.py index aaa6692b2346..49d5c7259389 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/_utils/model_base.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/_utils/model_base.py @@ -1,4 +1,4 @@ -# pylint: disable=line-too-long,useless-suppression,too-many-lines +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/aio/_patch.py b/sdk/ai/azure-ai-agents/azure/ai/agents/aio/_patch.py index b52f8e076a22..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/aio/_patch.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/aio/_patch.py @@ -1,987 +1,15 @@ -# pylint: disable=line-too-long,useless-suppression,too-many-lines -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio # pylint: disable = do-not-import-asyncio -import io -import logging -import os +from typing import List -from typing import ( - IO, - TYPE_CHECKING, - Any, - Dict, - List, - MutableMapping, - Optional, - Union, - Callable, - Set, - overload, -) -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from ._client import AgentsClient as AgentsClientGenerated -from .operations._patch import _has_errors_in_toolcalls_output -from .. import _types - -if TYPE_CHECKING: - - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials_async import AsyncTokenCredential - -logger = logging.getLogger(__name__) - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - - -class AgentsClient(AgentsClientGenerated): # pylint: disable=client-accepts-api-version-keyword - - def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: - if not endpoint: - raise ValueError("Please provide the 1DP endpoint.") - # TODO: Remove this custom code when 1DP service will be available - parts = endpoint.split(";") - # Detect legacy endpoint and build it in old way only in tests. - if os.environ.get("AZURE_AI_AGENTS_TESTS_IS_TEST_RUN") == "True" and len(parts) == 4: - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - endpoint = ( - f"{endpoint}/agents/v1.0/subscriptions" - f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" - f"/Microsoft.MachineLearningServices/workspaces/{project_name}" - ) - # Override the credential scope with the legacy one. - kwargs["credential_scopes"] = ["https://management.azure.com/.default"] - kwargs["api_version"] = "2025_05_01" - # End of legacy endpoints handling. - super().__init__(endpoint, credential, **kwargs) - - # Create and store your function tool + retry limit on the client instance. - self._function_tool = _models.AsyncFunctionTool(set()) - self._function_tool_max_retry = 10 - - # Inject them into the RunsOperations instance so that run operations can use them. - self.runs._function_tool = self._function_tool - self.runs._function_tool_max_retry = self._function_tool_max_retry - - # pylint: disable=arguments-differ - @overload - async def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.agents.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.agentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: agent. The agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.agents.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - agentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.agentsApiResponseFormatMode - or ~azure.ai.agents.models.agentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: agent. The agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: agent. The agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: agent. The agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :type body: Union[JSON, IO[bytes]] - :keyword model: The ID of the model to use. Required if `body` is not provided. - :paramtype model: str - :keyword name: The name of the new agent. - :paramtype name: Optional[str] - :keyword description: A description for the new agent. - :paramtype description: Optional[str] - :keyword instructions: System instructions for the agent. - :paramtype instructions: Optional[str] - :keyword tools: List of tools definitions for the agent. - :paramtype tools: Optional[List[_models.ToolDefinition]] - :keyword tool_resources: Resources used by the agent's tools. - :paramtype tool_resources: Optional[_models.ToolResources] - :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :paramtype toolset: Optional[_models.AsyncToolSet] - :keyword temperature: Sampling temperature for generating agent responses. - :paramtype temperature: Optional[float] - :keyword top_p: Nucleus sampling parameter. - :paramtype top_p: Optional[float] - :keyword response_format: Response format for tool calls. - :paramtype response_format: Optional["_types.AgentsResponseFormatOption"] - :keyword metadata: Key/value pairs for storing additional information. - :paramtype metadata: Optional[Dict[str, str]] - :keyword content_type: Content type of the body. - :paramtype content_type: str - :return: An agent object. - :rtype: _models.Agent - :raises: HttpResponseError for HTTP errors. - """ - if body is not _Unset: - if isinstance(body, io.IOBase): - return await super().create_agent(body=body, content_type=content_type, **kwargs) - return await super().create_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - new_agent = await super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - return new_agent - - # pylint: disable=arguments-differ - @overload - async def update_agent( # pylint: disable=arguments-differ - self, - agent_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.agents.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - agentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.agentsApiResponseFormatMode - or ~azure.ai.agents.models.agentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: agent. The agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def update_agent( # pylint: disable=arguments-differ - self, - agent_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.agents.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - agentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.agentsApiResponseFormatMode - or ~azure.ai.agents.models.agentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: agent. The agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: agent. The agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: agent. The agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_agent( - self, - agent_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.agents.models.ToolResources - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.agents.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - agentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.agentsApiResponseFormatMode - or ~azure.ai.agents.models.agentsApiResponseFormat - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: agent. The agent is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - self._validate_tools_and_tool_resources(tools, tool_resources) - - if body is not _Unset: - if isinstance(body, io.IOBase): - return await super().update_agent(body=body, content_type=content_type, **kwargs) - return await super().update_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - return await super().update_agent( - agent_id=agent_id, - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def _validate_tools_and_tool_resources( - self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] - ): - if tool_resources is None: - return - if tools is None: - tools = [] - - if tool_resources.file_search is not None and not any( - isinstance(tool, _models.FileSearchToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" - ) - if tool_resources.code_interpreter is not None and not any( - isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" - ) - - @distributed_trace_async - async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param agent_id: Identifier of the agent. Required. - :type agent_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - return await super().delete_agent(agent_id, **kwargs) - - @distributed_trace - def enable_auto_function_calls( # pylint: disable=client-method-missing-kwargs - self, - tools: Union[Set[Callable[..., Any]], _models.AsyncFunctionTool, _models.AsyncToolSet], - max_retry: int = 10, - ) -> None: - """Enables tool calls to be executed automatically during runs.create_and_process or runs.stream. - If this is not set, functions must be called manually. - If automatic function calls fail, the agents will receive error messages allowing it to retry with another - function call or figure out the answer with its knowledge. - - :param tools: A function tool, toolset, or a set of callable functions. - :type tools: Union[Set[Callable[..., Any]], _models.AsyncFunctionTool, _models.AsyncToolSet] - :param max_retry: Maximum number of errors allowed and retry per run or stream. Default value is 10. - :type max_retry: int - """ - if isinstance(tools, _models.AsyncFunctionTool): - self._function_tool = tools - elif isinstance(tools, _models.AsyncToolSet): - tool = tools.get_tool(_models.AsyncFunctionTool) - self._function_tool = tool - else: - self._function_tool = _models.AsyncFunctionTool(tools) - - self._function_tool_max_retry = max_retry - - # Propagate into the RunsOperations instance - # pylint: disable=protected-access - self.runs._function_tool = self._function_tool - self.runs._function_tool_max_retry = self._function_tool_max_retry - # pylint: enable=protected-access - - @overload - async def create_thread_and_run( - self, - *, - agent_id: str, - content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """ - Creates a new agent thread and immediately starts a run using that new thread. - - :keyword agent_id: The ID of the agent for which the thread should be created. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type for JSON body. Default is "application/json". - :type content_type: str - :keyword thread: The details used to create the new thread. If none provided, an empty thread is - created. - :type thread: ~azure.ai.agents.models.AgentThreadCreationOptions - :keyword model: Override the model the agent uses for this run. - :type model: str - :keyword instructions: Override the system instructions for this run. - :type instructions: str - :keyword tools: Override the list of enabled tools for this run. - :type tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. - :type tool_resources: ~azure.ai.agents.models.UpdateToolResourcesOptions - :keyword temperature: Sampling temperature between 0 and 2. Higher = more random. - :type temperature: float - :keyword top_p: Nucleus sampling parameter between 0 and 1. - :type top_p: float - :keyword max_prompt_tokens: Max prompt tokens to use across the run. - :type max_prompt_tokens: int - :keyword max_completion_tokens: Max completion tokens to use across the run. - :type max_completion_tokens: int - :keyword truncation_strategy: Strategy for dropping old messages as context grows. - :type truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls which tool the model will call. - :type tool_choice: str or - ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format the model must output. - :type response_format: str or - ~azure.ai.agents.models.AgentsApiResponseFormatMode or - ~azure.ai.agents.models.AgentsApiResponseFormat or - ~azure.ai.agents.models.ResponseFormatJsonSchemaType - :keyword parallel_tool_calls: If True, tools will be invoked in parallel. - :type parallel_tool_calls: bool - :keyword metadata: Up to 16 key/value pairs for structured metadata on the run. - :type metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping. - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread_and_run( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """ - Creates a new agent thread and immediately starts a run using a JSON body. - - :param body: The request payload as a JSON-serializable dict. - :type body: JSON - :keyword content_type: Body Parameter content-type for JSON body. Default is "application/json". - :type content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping. - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread_and_run( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """ - Creates a new agent thread and immediately starts a run using a binary body. - - :param body: The request payload as a byte-stream. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type for binary body. Default is "application/json". - :type content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping. - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_thread_and_run( # type: ignore - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - agent_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional[_types.AgentsToolChoiceOption] = None, - response_format: Optional[_types.AgentsResponseFormatOption] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """ - Creates a new agent thread and immediately starts a run using the specified parameters. - - :param body: Either a JSON payload (dict) or a binary stream (IO[bytes]). Use JSON overload for - dict bodies and binary overload for IO[bytes]. - :type body: JSON or IO[bytes] - :keyword agent_id: The ID of the agent for which the thread should be created. - Required when not using the JSON/body overload. - :type agent_id: str - :keyword thread: The details used to create the new thread. If none provided, an empty thread is - created. - :type thread: ~azure.ai.agents.models.AgentThreadCreationOptions - :keyword model: Override the model the agent uses for this run. - :type model: str - :keyword instructions: Override the system instructions for this run. - :type instructions: str - :keyword tools: Override the list of enabled tools for this run. - :type tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. - :type tool_resources: ~azure.ai.agents.models.UpdateToolResourcesOptions - :keyword temperature: Sampling temperature between 0 and 2. Higher = more random. - :type temperature: float - :keyword top_p: Nucleus sampling parameter between 0 and 1. - :type top_p: float - :keyword max_prompt_tokens: Max prompt tokens to use across the run. - :type max_prompt_tokens: int - :keyword max_completion_tokens: Max completion tokens to use across the run. - :type max_completion_tokens: int - :keyword truncation_strategy: Strategy for dropping old messages as context grows. - :type truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls which tool the model will call. - :type tool_choice: str or - ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format the model must output. - :type response_format: str or - ~azure.ai.agents.models.AgentsApiResponseFormatMode or - ~azure.ai.agents.models.AgentsApiResponseFormat or - ~azure.ai.agents.models.ResponseFormatJsonSchemaType - :keyword parallel_tool_calls: If True, tools will be invoked in parallel. - :type parallel_tool_calls: bool - :keyword metadata: Up to 16 key/value pairs for structured metadata on the run. - :type metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping. - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ValueError: If the combination of arguments is invalid. - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # JSON-body overload - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - return await super().create_thread_and_run(body, content_type=content_type, **kwargs) # JSON payload - - # Binary-body overload - if isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - return await super().create_thread_and_run(body, content_type=content_type, **kwargs) # binary stream - - # Keyword-only overload - if agent_id is not _Unset: - return await super().create_thread_and_run( - agent_id=agent_id, - thread=thread, - model=model, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - stream_parameter=False, # force none-streaming - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - # Nothing matched - raise ValueError( - "Invalid arguments for create_thread_and_run(). " - "Provide either a JSON dict, a binary IO[bytes], or keyword parameters including 'agent_id'." - ) - - @distributed_trace_async - async def create_thread_and_process_run( - self, - *, - agent_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - polling_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """ - Creates a new agent thread and run in one call, then polls until the run enters a terminal - state, executing any required tool calls via the provided ToolSet. - - :keyword agent_id: The unique identifier of the agent to run. Required if `body` is unset. - :type agent_id: str - :keyword thread: Options for creating the new thread (initial messages, metadata, tool resources). - :type thread: ~azure.ai.agents.models.AgentThreadCreationOptions - :keyword model: Optional override of the model deployment name to use for this run. - :type model: str, optional - :keyword instructions: Optional override of the system instructions for this run. - :type instructions: str, optional - :keyword toolset: A ToolSet instance containing both `.definitions` and `.resources` for tools. - If provided, its definitions/resources are used; otherwise no tools are passed. - :type toolset: azure.ai.agents._tools.ToolSet, optional - :keyword temperature: Sampling temperature for the model (0.0-2.0), higher is more random. - :type temperature: float, optional - :keyword top_p: Nucleus sampling value (0.0-1.0), alternative to temperature. - :type top_p: float, optional - :keyword max_prompt_tokens: Maximum total prompt tokens across turns; run ends "incomplete" if exceeded. - :type max_prompt_tokens: int, optional - :keyword max_completion_tokens: Maximum total completion tokens across turns; run ends "incomplete" if exceeded. - :type max_completion_tokens: int, optional - :keyword truncation_strategy: Strategy for dropping old messages when context window overflows. - :type truncation_strategy: ~azure.ai.agents.models.TruncationObject, optional - :keyword tool_choice: Controls which tool (if any) the model is allowed to call. - :type tool_choice: str or ~azure.ai.agents.models.AgentsToolChoiceOption, optional - :keyword response_format: Specifies the required format for the model's output. - :type response_format: str or ~azure.ai.agents.models.AgentsResponseFormatOption, optional - :keyword parallel_tool_calls: If True, allows tool calls to be executed in parallel. - :type parallel_tool_calls: bool, optional - :keyword metadata: Optional metadata (up to 16 key/value pairs) to attach to the run. - :type metadata: dict[str, str], optional - :keyword polling_interval: Seconds to wait between polling attempts for run status. Default is 1. - :type polling_interval: int, optional - :return: The final ThreadRun object, in a terminal state (succeeded, failed, or cancelled). - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - If the underlying REST call to create the thread+run or to poll fails. - """ - tools = toolset.definitions if toolset else None - tool_resources = toolset.resources if toolset else None - - run = await self.create_thread_and_run( - agent_id=agent_id, - thread=thread, - model=model, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - current_retry = 0 - # keep polling until we leave a "running" or "queued" or "requires_action" state - while run.status in ( - _models.RunStatus.QUEUED, - _models.RunStatus.IN_PROGRESS, - _models.RunStatus.REQUIRES_ACTION, - ): - await asyncio.sleep(polling_interval) - run = await self.runs.get(thread_id=run.thread_id, run_id=run.id) - - # If the model requests tool calls, execute and submit them - if run.status == _models.RunStatus.REQUIRES_ACTION and isinstance( - run.required_action, _models.SubmitToolOutputsAction - ): - tool_calls = run.required_action.submit_tool_outputs.tool_calls or [] - - if not tool_calls: - logger.warning("No tool calls provided – cancelling run") - run = await self.runs.cancel(thread_id=run.thread_id, run_id=run.id) - break - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = _models.AsyncToolSet() - toolset.add(self._function_tool) - tool_outputs = await toolset.execute_tool_calls(tool_calls) - - if _has_errors_in_toolcalls_output(tool_outputs): - if current_retry >= self._function_tool_max_retry: # pylint:disable=no-else-return - logger.warning( - "Tool outputs contain errors - reaching max retry %s", self._function_tool_max_retry - ) - return await self.runs.cancel(thread_id=run.thread_id, run_id=run.id) - else: - logger.warning("Tool outputs contain errors - retrying") - current_retry += 1 - - logger.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - run2 = await self.runs.submit_tool_outputs( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs - ) - logger.debug("Tool outputs submitted to run: %s", run2.id) - - logger.debug("Current run ID: %s with status: %s", run.id, run.status) - - return run - - -__all__: List[str] = ["AgentsClient"] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_operations.py b/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_operations.py index 15a241391441..a6fecc701226 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_operations.py @@ -43,8 +43,9 @@ from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict -from ... import models as _models -from ..._utils.model_base import Model as _Model, SdkJSONEncoder, _deserialize +from ... import models as _models2 +from ......servicepatterns import models as _servicepatterns_models6 +from ..._utils.model_base import Model as _Model, SdkJSONEncoder, _deserialize, _failsafe_deserialize from ..._utils.serialization import Deserializer, Serializer from ..._utils.utils import ClientMixinABC, prepare_multipart_form_data from ...operations._operations import ( @@ -122,11 +123,11 @@ async def create( self, *, content_type: str = "application/json", - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, + messages: Optional[List[_models2.ThreadMessageOptions]] = None, + tool_resources: Optional[_models2.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models2.AgentThread: """Creates a new thread. Threads contain messages and can be run by agents. :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -152,7 +153,9 @@ async def create( """ @overload - async def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.AgentThread: + async def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models2.AgentThread: """Creates a new thread. Threads contain messages and can be run by agents. :param body: Required. @@ -168,7 +171,7 @@ async def create(self, body: JSON, *, content_type: str = "application/json", ** @overload async def create( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: + ) -> _models2.AgentThread: """Creates a new thread. Threads contain messages and can be run by agents. :param body: Required. @@ -186,11 +189,11 @@ async def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, + messages: Optional[List[_models2.ThreadMessageOptions]] = None, + tool_resources: Optional[_models2.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models2.AgentThread: """Creates a new thread. Threads contain messages and can be run by agents. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -225,7 +228,7 @@ async def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models2.AgentThread] = kwargs.pop("cls", None) if body is _Unset: body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} @@ -263,12 +266,13 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models2.AgentThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -280,10 +284,10 @@ def list( self, *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models2.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> AsyncIterable["_models.AgentThread"]: + ) -> AsyncIterable["_models2.AgentThread"]: """Gets a list of threads that were previously created. :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and @@ -304,7 +308,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.AgentThread]] = kwargs.pop("cls", None) + cls: ClsType[List[_models2.AgentThread]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -333,7 +337,7 @@ def prepare_request(_continuation_token=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.AgentThread], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models2.AgentThread], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, AsyncList(list_of_elem) @@ -349,14 +353,15 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def get(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + async def get(self, thread_id: str, **kwargs: Any) -> _models2.AgentThread: """Gets information about an existing thread. :param thread_id: Identifier of the thread. Required. @@ -376,7 +381,7 @@ async def get(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models2.AgentThread] = kwargs.pop("cls", None) _request = build_threads_get_request( thread_id=thread_id, @@ -403,12 +408,13 @@ async def get(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models2.AgentThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -421,10 +427,10 @@ async def update( thread_id: str, *, content_type: str = "application/json", - tool_resources: Optional[_models.ToolResources] = None, + tool_resources: Optional[_models2.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models2.AgentThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -451,7 +457,7 @@ async def update( @overload async def update( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: + ) -> _models2.AgentThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -469,7 +475,7 @@ async def update( @overload async def update( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: + ) -> _models2.AgentThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -490,10 +496,10 @@ async def update( thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - tool_resources: Optional[_models.ToolResources] = None, + tool_resources: Optional[_models2.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models2.AgentThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -527,7 +533,7 @@ async def update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models2.AgentThread] = kwargs.pop("cls", None) if body is _Unset: body = {"metadata": metadata, "tool_resources": tool_resources} @@ -566,12 +572,13 @@ async def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models2.AgentThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -579,7 +586,7 @@ async def update( return deserialized # type: ignore @distributed_trace_async - async def delete(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + async def delete(self, thread_id: str, **kwargs: Any) -> _models2.ThreadDeletionStatus: """Deletes an existing thread. :param thread_id: Identifier of the thread. Required. @@ -599,7 +606,7 @@ async def delete(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionS _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadDeletionStatus] = kwargs.pop("cls", None) _request = build_threads_delete_request( thread_id=thread_id, @@ -626,12 +633,13 @@ async def delete(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionS except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + deserialized = _deserialize(_models2.ThreadDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -661,13 +669,13 @@ async def create( self, thread_id: str, *, - role: Union[str, _models.MessageRole], + role: Union[str, _models2.MessageRole], content: "_types.MessageInputContent", content_type: str = "application/json", - attachments: Optional[List[_models.MessageAttachment]] = None, + attachments: Optional[List[_models2.MessageAttachment]] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models2.ThreadMessage: """Creates a new message on a specified thread. :param thread_id: Identifier of the thread. Required. @@ -702,7 +710,7 @@ async def create( @overload async def create( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models2.ThreadMessage: """Creates a new message on a specified thread. :param thread_id: Identifier of the thread. Required. @@ -720,7 +728,7 @@ async def create( @overload async def create( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models2.ThreadMessage: """Creates a new message on a specified thread. :param thread_id: Identifier of the thread. Required. @@ -741,12 +749,12 @@ async def create( thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - role: Union[str, _models.MessageRole] = _Unset, + role: Union[str, _models2.MessageRole] = _Unset, content: "_types.MessageInputContent" = _Unset, - attachments: Optional[List[_models.MessageAttachment]] = None, + attachments: Optional[List[_models2.MessageAttachment]] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models2.ThreadMessage: """Creates a new message on a specified thread. :param thread_id: Identifier of the thread. Required. @@ -788,7 +796,7 @@ async def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadMessage] = kwargs.pop("cls", None) if body is _Unset: if role is _Unset: @@ -831,12 +839,13 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) + deserialized = _deserialize(_models2.ThreadMessage, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -850,10 +859,10 @@ def list( *, run_id: Optional[str] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models2.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> AsyncIterable["_models.ThreadMessage"]: + ) -> AsyncIterable["_models2.ThreadMessage"]: """Gets a list of messages that exist on a thread. :param thread_id: Identifier of the thread. Required. @@ -878,7 +887,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.ThreadMessage]] = kwargs.pop("cls", None) + cls: ClsType[List[_models2.ThreadMessage]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -909,7 +918,7 @@ def prepare_request(_continuation_token=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.ThreadMessage], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models2.ThreadMessage], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, AsyncList(list_of_elem) @@ -932,7 +941,7 @@ async def get_next(_continuation_token=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + async def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models2.ThreadMessage: """Retrieves an existing message. :param thread_id: Identifier of the thread. Required. @@ -954,7 +963,7 @@ async def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.T _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadMessage] = kwargs.pop("cls", None) _request = build_messages_get_request( thread_id=thread_id, @@ -982,12 +991,13 @@ async def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.T except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) + deserialized = _deserialize(_models2.ThreadMessage, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1003,7 +1013,7 @@ async def update( content_type: str = "application/json", metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models2.ThreadMessage: """Modifies an existing message on an existing thread. :param thread_id: Identifier of the thread. Required. @@ -1026,7 +1036,7 @@ async def update( @overload async def update( self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models2.ThreadMessage: """Modifies an existing message on an existing thread. :param thread_id: Identifier of the thread. Required. @@ -1046,7 +1056,7 @@ async def update( @overload async def update( self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models2.ThreadMessage: """Modifies an existing message on an existing thread. :param thread_id: Identifier of the thread. Required. @@ -1072,7 +1082,7 @@ async def update( *, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models2.ThreadMessage: """Modifies an existing message on an existing thread. :param thread_id: Identifier of the thread. Required. @@ -1102,7 +1112,7 @@ async def update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadMessage] = kwargs.pop("cls", None) if body is _Unset: body = {"metadata": metadata} @@ -1142,12 +1152,13 @@ async def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) + deserialized = _deserialize(_models2.ThreadMessage, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1178,25 +1189,25 @@ async def create( thread_id: str, *, agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models2.RunAdditionalFieldList]]] = None, content_type: str = "application/json", model: Optional[str] = None, instructions: Optional[str] = None, additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, + additional_messages: Optional[List[_models2.ThreadMessageOptions]] = None, + tools: Optional[List[_models2.ToolDefinition]] = None, stream_parameter: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, + truncation_strategy: Optional[_models2.TruncationObject] = None, tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Identifier of the thread. Required. @@ -1291,10 +1302,10 @@ async def create( thread_id: str, body: JSON, *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models2.RunAdditionalFieldList]]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Identifier of the thread. Required. @@ -1320,10 +1331,10 @@ async def create( thread_id: str, body: IO[bytes], *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models2.RunAdditionalFieldList]]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Identifier of the thread. Required. @@ -1350,24 +1361,24 @@ async def create( body: Union[JSON, IO[bytes]] = _Unset, *, agent_id: str = _Unset, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models2.RunAdditionalFieldList]]] = None, model: Optional[str] = None, instructions: Optional[str] = None, additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, + additional_messages: Optional[List[_models2.ThreadMessageOptions]] = None, + tools: Optional[List[_models2.ToolDefinition]] = None, stream_parameter: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, + truncation_strategy: Optional[_models2.TruncationObject] = None, tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Identifier of the thread. Required. @@ -1466,7 +1477,7 @@ async def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: if agent_id is _Unset: @@ -1525,12 +1536,13 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models2.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1543,10 +1555,10 @@ def list( thread_id: str, *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models2.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> AsyncIterable["_models.ThreadRun"]: + ) -> AsyncIterable["_models2.ThreadRun"]: """Gets a list of runs for a specified thread. :param thread_id: Identifier of the thread. Required. @@ -1569,7 +1581,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.ThreadRun]] = kwargs.pop("cls", None) + cls: ClsType[List[_models2.ThreadRun]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -1599,7 +1611,7 @@ def prepare_request(_continuation_token=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.ThreadRun], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models2.ThreadRun], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, AsyncList(list_of_elem) @@ -1615,14 +1627,15 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + async def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models2.ThreadRun: """Gets an existing run from an existing thread. :param thread_id: Identifier of the thread. Required. @@ -1644,7 +1657,7 @@ async def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Threa _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadRun] = kwargs.pop("cls", None) _request = build_runs_get_request( thread_id=thread_id, @@ -1672,12 +1685,13 @@ async def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Threa except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models2.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1693,7 +1707,7 @@ async def update( content_type: str = "application/json", metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Modifies an existing thread run. :param thread_id: Identifier of the thread. Required. @@ -1716,7 +1730,7 @@ async def update( @overload async def update( self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Modifies an existing thread run. :param thread_id: Identifier of the thread. Required. @@ -1736,7 +1750,7 @@ async def update( @overload async def update( self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Modifies an existing thread run. :param thread_id: Identifier of the thread. Required. @@ -1762,7 +1776,7 @@ async def update( *, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Modifies an existing thread run. :param thread_id: Identifier of the thread. Required. @@ -1792,7 +1806,7 @@ async def update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: body = {"metadata": metadata} @@ -1832,12 +1846,13 @@ async def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models2.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1850,11 +1865,11 @@ async def submit_tool_outputs( thread_id: str, run_id: str, *, - tool_outputs: List[_models.ToolOutput], + tool_outputs: List[_models2.ToolOutput], content_type: str = "application/json", stream_parameter: Optional[bool] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. :param thread_id: Identifier of the thread. Required. @@ -1877,7 +1892,7 @@ async def submit_tool_outputs( @overload async def submit_tool_outputs( self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. :param thread_id: Identifier of the thread. Required. @@ -1897,7 +1912,7 @@ async def submit_tool_outputs( @overload async def submit_tool_outputs( self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. :param thread_id: Identifier of the thread. Required. @@ -1921,10 +1936,10 @@ async def submit_tool_outputs( run_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - tool_outputs: List[_models.ToolOutput] = _Unset, + tool_outputs: List[_models2.ToolOutput] = _Unset, stream_parameter: Optional[bool] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. :param thread_id: Identifier of the thread. Required. @@ -1954,7 +1969,7 @@ async def submit_tool_outputs( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: if tool_outputs is _Unset: @@ -1996,12 +2011,13 @@ async def submit_tool_outputs( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models2.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2009,7 +2025,7 @@ async def submit_tool_outputs( return deserialized # type: ignore @distributed_trace_async - async def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + async def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models2.ThreadRun: """Cancels a run of an in‐progress thread. :param thread_id: Identifier of the thread. Required. @@ -2031,7 +2047,7 @@ async def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Th _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadRun] = kwargs.pop("cls", None) _request = build_runs_cancel_request( thread_id=thread_id, @@ -2059,12 +2075,13 @@ async def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Th except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models2.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2096,9 +2113,9 @@ async def get( run_id: str, step_id: str, *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models2.RunAdditionalFieldList]]] = None, **kwargs: Any - ) -> _models.RunStep: + ) -> _models2.RunStep: """Retrieves a single run step from a thread run. :param thread_id: Identifier of the thread. Required. @@ -2127,7 +2144,7 @@ async def get( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + cls: ClsType[_models2.RunStep] = kwargs.pop("cls", None) _request = build_run_steps_get_request( thread_id=thread_id, @@ -2157,12 +2174,13 @@ async def get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.RunStep, response.json()) + deserialized = _deserialize(_models2.RunStep, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2175,12 +2193,12 @@ def list( thread_id: str, run_id: str, *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models2.RunAdditionalFieldList]]] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models2.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> AsyncIterable["_models.RunStep"]: + ) -> AsyncIterable["_models2.RunStep"]: """Gets a list of run steps from a thread run. :param thread_id: Identifier of the thread. Required. @@ -2210,7 +2228,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.RunStep]] = kwargs.pop("cls", None) + cls: ClsType[List[_models2.RunStep]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2242,7 +2260,7 @@ def prepare_request(_continuation_token=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.RunStep], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models2.RunStep], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, AsyncList(list_of_elem) @@ -2258,7 +2276,8 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response @@ -2284,8 +2303,8 @@ def __init__(self, *args, **kwargs) -> None: @distributed_trace_async async def list( - self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any - ) -> _models.FileListResponse: + self, *, purpose: Optional[Union[str, _models2.FilePurpose]] = None, **kwargs: Any + ) -> _models2.FileListResponse: """Gets a list of previously uploaded files. :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", @@ -2307,7 +2326,7 @@ async def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + cls: ClsType[_models2.FileListResponse] = kwargs.pop("cls", None) _request = build_files_list_request( purpose=purpose, @@ -2334,12 +2353,13 @@ async def list( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileListResponse, response.json()) + deserialized = _deserialize(_models2.FileListResponse, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2347,14 +2367,14 @@ async def list( return deserialized # type: ignore @overload - async def _upload_file(self, body: _models._models.UploadFileRequest, **kwargs: Any) -> _models.FileInfo: ... + async def _upload_file(self, body: _models2._models.UploadFileRequest, **kwargs: Any) -> _models2.FileInfo: ... @overload - async def _upload_file(self, body: JSON, **kwargs: Any) -> _models.FileInfo: ... + async def _upload_file(self, body: JSON, **kwargs: Any) -> _models2.FileInfo: ... @distributed_trace_async async def _upload_file( - self, body: Union[_models._models.UploadFileRequest, JSON], **kwargs: Any - ) -> _models.FileInfo: + self, body: Union[_models2._models.UploadFileRequest, JSON], **kwargs: Any + ) -> _models2.FileInfo: """Uploads a file for use by other operations. :param body: Multipart body. Is either a UploadFileRequest type or a JSON type. Required. @@ -2374,7 +2394,7 @@ async def _upload_file( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileInfo] = kwargs.pop("cls", None) + cls: ClsType[_models2.FileInfo] = kwargs.pop("cls", None) _body = body.as_dict() if isinstance(body, _Model) else body _file_fields: List[str] = ["file"] @@ -2407,12 +2427,13 @@ async def _upload_file( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileInfo, response.json()) + deserialized = _deserialize(_models2.FileInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2420,7 +2441,7 @@ async def _upload_file( return deserialized # type: ignore @distributed_trace_async - async def delete(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + async def delete(self, file_id: str, **kwargs: Any) -> _models2.FileDeletionStatus: """Delete a previously uploaded file. :param file_id: The ID of the file to delete. Required. @@ -2440,7 +2461,7 @@ async def delete(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatu _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models2.FileDeletionStatus] = kwargs.pop("cls", None) _request = build_files_delete_request( file_id=file_id, @@ -2467,12 +2488,13 @@ async def delete(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatu except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + deserialized = _deserialize(_models2.FileDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2480,7 +2502,7 @@ async def delete(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatu return deserialized # type: ignore @distributed_trace_async - async def get(self, file_id: str, **kwargs: Any) -> _models.FileInfo: + async def get(self, file_id: str, **kwargs: Any) -> _models2.FileInfo: """Returns information about a specific file. Does not retrieve file content. :param file_id: The ID of the file to retrieve. Required. @@ -2500,7 +2522,7 @@ async def get(self, file_id: str, **kwargs: Any) -> _models.FileInfo: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileInfo] = kwargs.pop("cls", None) + cls: ClsType[_models2.FileInfo] = kwargs.pop("cls", None) _request = build_files_get_request( file_id=file_id, @@ -2527,12 +2549,13 @@ async def get(self, file_id: str, **kwargs: Any) -> _models.FileInfo: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileInfo, response.json()) + deserialized = _deserialize(_models2.FileInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2587,7 +2610,8 @@ async def _get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[ except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) deserialized = response.iter_bytes() @@ -2619,10 +2643,10 @@ def list( self, *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models2.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> AsyncIterable["_models.VectorStore"]: + ) -> AsyncIterable["_models2.VectorStore"]: """Returns a list of vector stores. :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and @@ -2643,7 +2667,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.VectorStore]] = kwargs.pop("cls", None) + cls: ClsType[List[_models2.VectorStore]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2672,7 +2696,7 @@ def prepare_request(_continuation_token=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.VectorStore], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models2.VectorStore], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, AsyncList(list_of_elem) @@ -2688,7 +2712,8 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response @@ -2701,12 +2726,12 @@ async def create( content_type: str = "application/json", file_ids: Optional[List[str]] = None, name: Optional[str] = None, - store_configuration: Optional[_models.VectorStoreConfiguration] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + store_configuration: Optional[_models2.VectorStoreConfiguration] = None, + expires_after: Optional[_models2.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models2.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.VectorStore: + ) -> _models2.VectorStore: """Creates a vector store. :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -2736,7 +2761,9 @@ async def create( """ @overload - async def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.VectorStore: + async def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models2.VectorStore: """Creates a vector store. :param body: Required. @@ -2752,7 +2779,7 @@ async def create(self, body: JSON, *, content_type: str = "application/json", ** @overload async def create( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: + ) -> _models2.VectorStore: """Creates a vector store. :param body: Required. @@ -2772,12 +2799,12 @@ async def create( *, file_ids: Optional[List[str]] = None, name: Optional[str] = None, - store_configuration: Optional[_models.VectorStoreConfiguration] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + store_configuration: Optional[_models2.VectorStoreConfiguration] = None, + expires_after: Optional[_models2.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models2.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.VectorStore: + ) -> _models2.VectorStore: """Creates a vector store. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -2816,7 +2843,7 @@ async def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStore] = kwargs.pop("cls", None) if body is _Unset: body = { @@ -2861,12 +2888,13 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStore, response.json()) + deserialized = _deserialize(_models2.VectorStore, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2874,7 +2902,7 @@ async def create( return deserialized # type: ignore @distributed_trace_async - async def get(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + async def get(self, vector_store_id: str, **kwargs: Any) -> _models2.VectorStore: """Returns the vector store object matching the specified ID. :param vector_store_id: Identifier of the vector store. Required. @@ -2894,7 +2922,7 @@ async def get(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStore] = kwargs.pop("cls", None) _request = build_vector_stores_get_request( vector_store_id=vector_store_id, @@ -2921,12 +2949,13 @@ async def get(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStore, response.json()) + deserialized = _deserialize(_models2.VectorStore, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2940,10 +2969,10 @@ async def modify( *, content_type: str = "application/json", name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + expires_after: Optional[_models2.VectorStoreExpirationPolicy] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.VectorStore: + ) -> _models2.VectorStore: """Modifies an existing vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -2968,7 +2997,7 @@ async def modify( @overload async def modify( self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: + ) -> _models2.VectorStore: """Modifies an existing vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -2986,7 +3015,7 @@ async def modify( @overload async def modify( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: + ) -> _models2.VectorStore: """Modifies an existing vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -3008,10 +3037,10 @@ async def modify( body: Union[JSON, IO[bytes]] = _Unset, *, name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + expires_after: Optional[_models2.VectorStoreExpirationPolicy] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.VectorStore: + ) -> _models2.VectorStore: """Modifies an existing vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -3043,7 +3072,7 @@ async def modify( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStore] = kwargs.pop("cls", None) if body is _Unset: body = {"expires_after": expires_after, "metadata": metadata, "name": name} @@ -3082,12 +3111,13 @@ async def modify( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStore, response.json()) + deserialized = _deserialize(_models2.VectorStore, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3095,7 +3125,7 @@ async def modify( return deserialized # type: ignore @distributed_trace_async - async def delete(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + async def delete(self, vector_store_id: str, **kwargs: Any) -> _models2.VectorStoreDeletionStatus: """Deletes the vector store object matching the specified ID. :param vector_store_id: Identifier of the vector store. Required. @@ -3116,7 +3146,7 @@ async def delete(self, vector_store_id: str, **kwargs: Any) -> _models.VectorSto _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStoreDeletionStatus] = kwargs.pop("cls", None) _request = build_vector_stores_delete_request( vector_store_id=vector_store_id, @@ -3143,12 +3173,13 @@ async def delete(self, vector_store_id: str, **kwargs: Any) -> _models.VectorSto except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + deserialized = _deserialize(_models2.VectorStoreDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3178,12 +3209,12 @@ def list( self, vector_store_id: str, *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + filter: Optional[Union[str, _models2.VectorStoreFileStatusFilter]] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models2.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> AsyncIterable["_models.VectorStoreFile"]: + ) -> AsyncIterable["_models2.VectorStoreFile"]: """Returns a list of vector store files. :param vector_store_id: Identifier of the vector store. Required. @@ -3209,7 +3240,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.VectorStoreFile]] = kwargs.pop("cls", None) + cls: ClsType[List[_models2.VectorStoreFile]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3240,7 +3271,7 @@ def prepare_request(_continuation_token=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.VectorStoreFile], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models2.VectorStoreFile], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, AsyncList(list_of_elem) @@ -3256,7 +3287,8 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3269,10 +3301,10 @@ async def create( *, content_type: str = "application/json", file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + data_source: Optional[_models2.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models2.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any - ) -> _models.VectorStoreFile: + ) -> _models2.VectorStoreFile: """Create a vector store file by attaching a file to a vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -3295,7 +3327,7 @@ async def create( @overload async def create( self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: + ) -> _models2.VectorStoreFile: """Create a vector store file by attaching a file to a vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -3313,7 +3345,7 @@ async def create( @overload async def create( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: + ) -> _models2.VectorStoreFile: """Create a vector store file by attaching a file to a vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -3335,10 +3367,10 @@ async def create( body: Union[JSON, IO[bytes]] = _Unset, *, file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + data_source: Optional[_models2.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models2.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any - ) -> _models.VectorStoreFile: + ) -> _models2.VectorStoreFile: """Create a vector store file by attaching a file to a vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -3368,7 +3400,7 @@ async def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStoreFile] = kwargs.pop("cls", None) if body is _Unset: body = {"chunking_strategy": chunking_strategy, "data_source": data_source, "file_id": file_id} @@ -3407,12 +3439,13 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) + deserialized = _deserialize(_models2.VectorStoreFile, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3420,7 +3453,7 @@ async def create( return deserialized # type: ignore @distributed_trace_async - async def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + async def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models2.VectorStoreFile: """Retrieves a vector store file. :param vector_store_id: Identifier of the vector store. Required. @@ -3442,7 +3475,7 @@ async def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _model _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStoreFile] = kwargs.pop("cls", None) _request = build_vector_store_files_get_request( vector_store_id=vector_store_id, @@ -3470,12 +3503,13 @@ async def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _model except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) + deserialized = _deserialize(_models2.VectorStoreFile, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3483,7 +3517,7 @@ async def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _model return deserialized # type: ignore @distributed_trace_async - async def delete(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFileDeletionStatus: + async def delete(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models2.VectorStoreFileDeletionStatus: """Deletes a vector store file. This removes the file‐to‐store link (does not delete the file itself). @@ -3507,7 +3541,7 @@ async def delete(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _mo _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) _request = build_vector_store_files_delete_request( vector_store_id=vector_store_id, @@ -3535,12 +3569,13 @@ async def delete(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _mo except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + deserialized = _deserialize(_models2.VectorStoreFileDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3572,10 +3607,10 @@ async def create( *, content_type: str = "application/json", file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + data_sources: Optional[List[_models2.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models2.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any - ) -> _models.VectorStoreFileBatch: + ) -> _models2.VectorStoreFileBatch: """Create a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -3598,7 +3633,7 @@ async def create( @overload async def create( self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: + ) -> _models2.VectorStoreFileBatch: """Create a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -3616,7 +3651,7 @@ async def create( @overload async def create( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: + ) -> _models2.VectorStoreFileBatch: """Create a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -3638,10 +3673,10 @@ async def create( body: Union[JSON, IO[bytes]] = _Unset, *, file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + data_sources: Optional[List[_models2.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models2.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any - ) -> _models.VectorStoreFileBatch: + ) -> _models2.VectorStoreFileBatch: """Create a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -3671,7 +3706,7 @@ async def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStoreFileBatch] = kwargs.pop("cls", None) if body is _Unset: body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_ids": file_ids} @@ -3710,12 +3745,13 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + deserialized = _deserialize(_models2.VectorStoreFileBatch, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3723,7 +3759,7 @@ async def create( return deserialized # type: ignore @distributed_trace_async - async def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models.VectorStoreFileBatch: + async def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models2.VectorStoreFileBatch: """Retrieve a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -3745,7 +3781,7 @@ async def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _mode _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStoreFileBatch] = kwargs.pop("cls", None) _request = build_vector_store_file_batches_get_request( vector_store_id=vector_store_id, @@ -3773,12 +3809,13 @@ async def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _mode except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + deserialized = _deserialize(_models2.VectorStoreFileBatch, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3786,7 +3823,7 @@ async def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _mode return deserialized # type: ignore @distributed_trace_async - async def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models.VectorStoreFileBatch: + async def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models2.VectorStoreFileBatch: """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. @@ -3809,7 +3846,7 @@ async def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _m _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + cls: ClsType[_models2.VectorStoreFileBatch] = kwargs.pop("cls", None) _request = build_vector_store_file_batches_cancel_request( vector_store_id=vector_store_id, @@ -3837,12 +3874,13 @@ async def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _m except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + deserialized = _deserialize(_models2.VectorStoreFileBatch, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3855,12 +3893,12 @@ def list_files( vector_store_id: str, batch_id: str, *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + filter: Optional[Union[str, _models2.VectorStoreFileStatusFilter]] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models2.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> AsyncIterable["_models.VectorStoreFile"]: + ) -> AsyncIterable["_models2.VectorStoreFile"]: """Returns a list of vector store files in a batch. :param vector_store_id: Identifier of the vector store. Required. @@ -3888,7 +3926,7 @@ def list_files( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.VectorStoreFile]] = kwargs.pop("cls", None) + cls: ClsType[List[_models2.VectorStoreFile]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3908,6 +3946,7 @@ def prepare_request(_continuation_token=None): order=order, after=_continuation_token, before=before, + api_version=self._config.api_version, headers=_headers, params=_params, ) @@ -3919,7 +3958,7 @@ def prepare_request(_continuation_token=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.VectorStoreFile], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models2.VectorStoreFile], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, AsyncList(list_of_elem) @@ -3935,14 +3974,17 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response return AsyncItemPaged(get_next, extract_data) -class AgentsClientOperationsMixin(ClientMixinABC[AsyncPipelineClient, AgentsClientConfiguration]): +class AgentsClientOperationsMixin( + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], AgentsClientConfiguration] +): @overload async def create_agent( @@ -3953,14 +3995,14 @@ async def create_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, + tools: Optional[List[_models2.ToolDefinition]] = None, + tool_resources: Optional[_models2.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: + ) -> _models2.Agent: """Creates a new agent. :keyword model: The ID of the model to use. Required. @@ -4009,7 +4051,9 @@ async def create_agent( """ @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + async def create_agent( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models2.Agent: """Creates a new agent. :param body: Required. @@ -4025,7 +4069,7 @@ async def create_agent(self, body: JSON, *, content_type: str = "application/jso @overload async def create_agent( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: + ) -> _models2.Agent: """Creates a new agent. :param body: Required. @@ -4047,14 +4091,14 @@ async def create_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, + tools: Optional[List[_models2.ToolDefinition]] = None, + tool_resources: Optional[_models2.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: + ) -> _models2.Agent: """Creates a new agent. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -4112,7 +4156,7 @@ async def create_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models2.Agent] = kwargs.pop("cls", None) if body is _Unset: if model is _Unset: @@ -4163,12 +4207,13 @@ async def create_agent( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models2.Agent, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4180,10 +4225,10 @@ def list_agents( self, *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models2.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> AsyncIterable["_models.Agent"]: + ) -> AsyncIterable["_models2.Agent"]: """Gets a list of agents that were previously created. :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and @@ -4204,7 +4249,7 @@ def list_agents( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Agent]] = kwargs.pop("cls", None) + cls: ClsType[List[_models2.Agent]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4233,7 +4278,7 @@ def prepare_request(_continuation_token=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Agent], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models2.Agent], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, AsyncList(list_of_elem) @@ -4249,14 +4294,15 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: + async def get_agent(self, agent_id: str, **kwargs: Any) -> _models2.Agent: """Retrieves an existing agent. :param agent_id: Identifier of the agent. Required. @@ -4276,7 +4322,7 @@ async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models2.Agent] = kwargs.pop("cls", None) _request = build_agents_get_agent_request( agent_id=agent_id, @@ -4303,12 +4349,13 @@ async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models2.Agent, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4325,14 +4372,14 @@ async def update_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, + tools: Optional[List[_models2.ToolDefinition]] = None, + tool_resources: Optional[_models2.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: + ) -> _models2.Agent: """Modifies an existing agent. :param agent_id: The ID of the agent to modify. Required. @@ -4387,7 +4434,7 @@ async def update_agent( @overload async def update_agent( self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: + ) -> _models2.Agent: """Modifies an existing agent. :param agent_id: The ID of the agent to modify. Required. @@ -4405,7 +4452,7 @@ async def update_agent( @overload async def update_agent( self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: + ) -> _models2.Agent: """Modifies an existing agent. :param agent_id: The ID of the agent to modify. Required. @@ -4430,14 +4477,14 @@ async def update_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, + tools: Optional[List[_models2.ToolDefinition]] = None, + tool_resources: Optional[_models2.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: + ) -> _models2.Agent: """Modifies an existing agent. :param agent_id: The ID of the agent to modify. Required. @@ -4499,7 +4546,7 @@ async def update_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models2.Agent] = kwargs.pop("cls", None) if body is _Unset: body = { @@ -4549,12 +4596,13 @@ async def update_agent( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models2.Agent, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4562,7 +4610,7 @@ async def update_agent( return deserialized # type: ignore @distributed_trace_async - async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models2.AgentDeletionStatus: """Deletes an agent. :param agent_id: Identifier of the agent. Required. @@ -4582,7 +4630,7 @@ async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDelet _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models2.AgentDeletionStatus] = kwargs.pop("cls", None) _request = build_agents_delete_agent_request( agent_id=agent_id, @@ -4609,12 +4657,13 @@ async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDelet except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + deserialized = _deserialize(_models2.AgentDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4627,23 +4676,23 @@ async def create_thread_and_run( *, agent_id: str, content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, + thread: Optional[_models2.AgentThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + tools: Optional[List[_models2.ToolDefinition]] = None, + tool_resources: Optional[_models2.UpdateToolResourcesOptions] = None, stream_parameter: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, + truncation_strategy: Optional[_models2.TruncationObject] = None, tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. :keyword agent_id: The ID of the agent for which the thread should be created. Required. @@ -4727,7 +4776,7 @@ async def create_thread_and_run( @overload async def create_thread_and_run( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. :param body: Required. @@ -4743,7 +4792,7 @@ async def create_thread_and_run( @overload async def create_thread_and_run( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. :param body: Required. @@ -4762,23 +4811,23 @@ async def create_thread_and_run( body: Union[JSON, IO[bytes]] = _Unset, *, agent_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, + thread: Optional[_models2.AgentThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + tools: Optional[List[_models2.ToolDefinition]] = None, + tool_resources: Optional[_models2.UpdateToolResourcesOptions] = None, stream_parameter: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, + truncation_strategy: Optional[_models2.TruncationObject] = None, tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models2.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -4869,7 +4918,7 @@ async def create_thread_and_run( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models2.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: if agent_id is _Unset: @@ -4926,12 +4975,13 @@ async def create_thread_and_run( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models2.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_patch.py b/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_patch.py index e53d9385654b..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_patch.py @@ -1,2230 +1,15 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import ast -import asyncio # pylint: disable=do-not-import-asyncio -import io -import logging -import os -import sys -import time -import json -from pathlib import Path -from typing import ( - IO, - TYPE_CHECKING, - Any, - AsyncIterator, - Dict, - List, - Optional, - Union, - cast, - overload, -) +from typing import List -from azure.core.tracing.decorator_async import distributed_trace_async - -from ... import models as _models -from ...models._enums import FilePurpose, RunStatus -from ._operations import FilesOperations as FilesOperationsGenerated -from ._operations import MessagesOperations as MessagesOperationsGenerated -from ._operations import RunsOperations as RunsOperationsGenerated -from ._operations import VectorStoresOperations as VectorStoresOperationsGenerated -from ._operations import VectorStoreFilesOperations as VectorStoreFilesOperationsGenerated -from ._operations import VectorStoreFileBatchesOperations as VectorStoreFileBatchesOperationsGenerated -from ..._utils.utils import FileType - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from ... import _types - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - -logger = logging.getLogger(__name__) - - -def _has_errors_in_toolcalls_output(tool_outputs: List[Dict]) -> bool: - """ - Check if any tool output contains an error. - - :param List[Dict] tool_outputs: A list of tool outputs to check. - :return: True if any output contains an error, False otherwise. - :rtype: bool - """ - for tool_output in tool_outputs: - output = tool_output.get("output") - if isinstance(output, str): - try: - output_json = json.loads(output) - if "error" in output_json: - return True - except json.JSONDecodeError: - continue - return False - - -class RunsOperations(RunsOperationsGenerated): - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - - # if the client didn't inject these for some reason, give safe defaults: - if not hasattr(self, "_function_tool"): - self._function_tool = _models.AsyncFunctionTool(set()) - if not hasattr(self, "_function_tool_max_retry"): - self._function_tool_max_retry = 0 - - # pylint: disable=arguments-differ - @overload - async def create( # pylint: disable=arguments-differ - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, - thread_id: str, - body: IO[bytes], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - agent_id: str = _Unset, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif agent_id is not _Unset: # Handle overload with keyword arguments. - response = super().create( - thread_id, - include=include, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return await response - - @distributed_trace_async - async def create_and_process( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - polling_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessageOptions] - :keyword toolset: The Collection of tools and resources (alternative to `tools` and - `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.agents.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.agents.models.AgentsApiResponseFormatMode or - ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword polling_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype polling_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = await self.create( - thread_id=thread_id, - agent_id=agent_id, - include=include, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=toolset.definitions if toolset else None, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - current_retry = 0 - while run.status in [ - RunStatus.QUEUED, - RunStatus.IN_PROGRESS, - RunStatus.REQUIRES_ACTION, - ]: - await asyncio.sleep(polling_interval) - run = await self.get(thread_id=thread_id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.warning("No tool calls provided - cancelling run") - await self.cancel(thread_id=thread_id, run_id=run.id) - break - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = _models.AsyncToolSet() - toolset.add(self._function_tool) - tool_outputs = await toolset.execute_tool_calls(tool_calls) - - if _has_errors_in_toolcalls_output(tool_outputs): - if current_retry >= self._function_tool_max_retry: # pylint:disable=no-else-return - logger.warning( - "Tool outputs contain errors - reaching max retry %s", self._function_tool_max_retry - ) - return await self.cancel(thread_id=thread_id, run_id=run.id) - else: - logger.warning("Tool outputs contain errors - retrying") - current_retry += 1 - - logger.debug("Tool outputs: %s", tool_outputs) - if tool_outputs: - run2 = await self.submit_tool_outputs( - thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs - ) - logger.debug("Tool outputs submitted to run: %s", run2.id) - - logger.debug("Current run ID: %s with status: %s", run.id, run.status) - - return run - - @overload - async def stream( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: None = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.AsyncAgentEventHandler]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: None - :paramtype event_handler: None. _models.AsyncAgentEventHandler will be applied as default. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def stream( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: _models.BaseAsyncAgentEventHandlerT, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.BaseAsyncAgentEventHandlerT]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.agents.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - event_handler: None = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.AsyncAgentEventHandler]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a `data: [DONE]` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword event_handler: None - :paramtype event_handler: None. _models.AsyncAgentEventHandler will be applied as default. - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - event_handler: _models.BaseAsyncAgentEventHandlerT, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.BaseAsyncAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a `data: [DONE]` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.agents.models.AsyncAgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - agent_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.BaseAsyncAgentEventHandlerT] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.BaseAsyncAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a `data: [DONE]` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.agents.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif agent_id is not _Unset: # Handle overload with keyword arguments. - response = super().create( - thread_id, - agent_id=agent_id, - include=include, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - if not event_handler: - event_handler = cast(_models.BaseAsyncAgentEventHandlerT, _models.AsyncAgentEventHandler()) - if isinstance(event_handler, _models.AsyncAgentEventHandler): - event_handler.set_max_retry(self._function_tool_max_retry) - - return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - # pylint: disable=arguments-differ - @overload - async def submit_tool_outputs( # pylint: disable=arguments-differ - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.agents.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.agents.models.ToolOutput] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return await response - - @overload - async def submit_tool_outputs_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]], - *, - event_handler: _models.BaseAsyncAgentEventHandler, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.agents.models.AsyncAgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: _models.BaseAsyncAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.agents.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.agents.models.AsyncAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: _models.BaseAsyncAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.agents.models.ToolOutput] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.agents.models.AsyncAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) - - async def _to_async_iterator(self, byte_str: bytes) -> AsyncIterator[bytes]: - yield byte_str - - async def _handle_submit_tool_outputs( - self, run: _models.ThreadRun, event_handler: _models.BaseAsyncAgentEventHandler, submit_with_error: bool - ) -> Any: - tool_outputs: Any = [] - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return tool_outputs - - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if ( - any(tool_call.type == "function" for tool_call in tool_calls) - and len(self._function_tool.definitions) > 0 - ): - toolset = _models.AsyncToolSet() - toolset.add(self._function_tool) - tool_outputs = await toolset.execute_tool_calls(tool_calls) - - if _has_errors_in_toolcalls_output(tool_outputs): - if submit_with_error: - logger.warning("Tool outputs contain errors - retrying") - else: - logger.warning("Tool outputs contain errors - reaching max retry limit") - response = await self.cancel(thread_id=run.thread_id, run_id=run.id) - response_json = ast.literal_eval(str(response)) - response_json_str = json.dumps(response_json) - - event_data_str = f"event: thread.run.cancelled\ndata: {response_json_str}" - byte_string = event_data_str.encode("utf-8") - - event_handler.initialize(self._to_async_iterator(byte_string), self._handle_submit_tool_outputs) - - return tool_outputs - logger.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - await self.submit_tool_outputs_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler - ) - return tool_outputs - - -class FilesOperations(FilesOperationsGenerated): - - # pylint: disable=arguments-differ - @overload - async def upload( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.agents.models.FilePurpose - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def upload( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.agents._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.agents.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload(self, body: JSON, **kwargs: Any) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.FileInfo: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: _models.FileInfo - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - # If a JSON body is provided directly, pass it along - if body is not None: - return await super()._upload_file(body=body, **kwargs) - - # Convert FilePurpose enum to string if necessary - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - # If file content is passed in directly - if file is not None and purpose is not None: - return await super()._upload_file(body={"file": file, "purpose": purpose, "filename": filename}, **kwargs) - - # If a file path is provided - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # If no explicit filename is provided, use the base name - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return await super()._upload_file(body={"file": file_content, "purpose": purpose}, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}.") from e - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - async def upload_and_poll( - self, body: JSON, *, polling_interval: float = 1, timeout: Optional[float] = None, **kwargs: Any - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword polling_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the uploaded file. - :paramtype timeout: float - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @overload - async def upload_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.agents._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.agents.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword polling_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the uploaded file. - :paramtype timeout: float - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @overload - async def upload_and_poll( - self, - *, - file_path: str, - purpose: Union[str, _models.FilePurpose], - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.agents.models.FilePurpose - :keyword polling_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the uploaded file. - :paramtype timeout: float - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @distributed_trace_async - async def upload_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.FileInfo: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :keyword polling_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the uploaded file. - :paramtype timeout: float - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: _models.FileInfo - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - :raises TimeoutError: If the polling times out. - """ - - curr_time = time.monotonic() - if body is not None: - uploaded_file = await self.upload(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = await self.upload(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = await self.upload(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - - if timeout is not None and (time.monotonic() - curr_time - polling_interval) >= timeout: - raise TimeoutError("Timeout reached. Stopping polling.") - - await asyncio.sleep(polling_interval) - uploaded_file = await self.get(uploaded_file.id) - - return uploaded_file - - @distributed_trace_async - async def get_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[bytes]: - """ - Asynchronously returns file content as a byte stream for the given file_id. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: An async iterator that yields bytes from the file content. - :rtype: AsyncIterator[bytes] - :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. - """ - kwargs["stream"] = True - response = await super()._get_file_content(file_id, **kwargs) - return cast(AsyncIterator[bytes], response) - - @distributed_trace_async - async def save(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: - """ - Asynchronously saves file content retrieved using a file identifier to the specified local directory. - - :param file_id: The unique identifier for the file to retrieve. - :type file_id: str - :param file_name: The name of the file to be saved. - :type file_name: str - :param target_dir: The directory where the file should be saved. Defaults to the current working directory. - :type target_dir: str or Path - :raises ValueError: If the target path is not a directory or the file name is invalid. - :raises RuntimeError: If file content retrieval fails or no content is found. - :raises TypeError: If retrieved chunks are not bytes-like objects. - :raises IOError: If writing to the file fails. - """ - try: - # Determine and validate the target directory - path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() - path.mkdir(parents=True, exist_ok=True) - if not path.is_dir(): - raise ValueError(f"The target path '{path}' is not a directory.") - - # Sanitize and validate the file name - sanitized_file_name = Path(file_name).name - if not sanitized_file_name: - raise ValueError("The provided file name is invalid.") - - # Retrieve the file content - file_content_stream = await self.get_content(file_id) - if not file_content_stream: - raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") - - # Collect all chunks asynchronously - chunks = [] - async for chunk in file_content_stream: - if isinstance(chunk, (bytes, bytearray)): - chunks.append(chunk) - else: - raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - - target_file_path = path / sanitized_file_name - - # Write the collected content to the file synchronously - def write_file(collected_chunks: list): - with open(target_file_path, "wb") as file: - for chunk in collected_chunks: - file.write(chunk) - - # Use the event loop to run the synchronous function in a thread executor - loop = asyncio.get_running_loop() - await loop.run_in_executor(None, write_file, chunks) - - logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) - - except (ValueError, RuntimeError, TypeError, IOError) as e: - logger.error("An error occurred in save_file: %s", e) - raise - - -class VectorStoresOperations(VectorStoresOperationsGenerated): - - @overload - async def create_and_poll( - self, - body: JSON, - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @overload - async def create_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.agents.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.agents.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @overload - async def create_and_poll( - self, - body: IO[bytes], - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @distributed_trace_async - async def create_and_poll( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.agents.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.agents.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - curr_time = time.monotonic() - if body is not _Unset: - if isinstance(body, dict): - vector_store = await super().create( - body=body, content_type=content_type or "application/json", **kwargs - ) - elif isinstance(body, io.IOBase): - vector_store = await super().create(body=body, content_type=content_type, **kwargs) - else: - raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") - else: - store_configuration = None - if data_sources: - store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) - - vector_store = await super().create( - file_ids=file_ids, - store_configuration=store_configuration, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - - while vector_store.status == "in_progress": - - if timeout is not None and (time.monotonic() - curr_time - polling_interval) >= timeout: - raise TimeoutError("Timeout reached. Stopping polling.") - - await asyncio.sleep(polling_interval) - vector_store = await super().get(vector_store.id) - - return vector_store - - -class VectorStoreFileBatchesOperations(VectorStoreFileBatchesOperationsGenerated): - - @overload - async def create_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @overload - async def create_and_poll( - self, - vector_store_id: str, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.agents.models.VectorStoreDataSource] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @overload - async def create_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @distributed_trace_async - async def create_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword content_type: Body parameter content-type. Defaults to "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - curr_time = time.monotonic() - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file_batch = await super().create( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file_batch = await super().create( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") - else: - vector_store_file_batch = await super().create( - vector_store_id=vector_store_id, - file_ids=file_ids, - data_sources=data_sources, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file_batch.status == "in_progress": - if timeout is not None and (time.monotonic() - curr_time - polling_interval) >= timeout: - raise TimeoutError("Timeout reached. Stopping polling.") - - await asyncio.sleep(polling_interval) - vector_store_file_batch = await super().get( - vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id - ) - - return vector_store_file_batch - - -class VectorStoreFilesOperations(VectorStoreFilesOperationsGenerated): - - @overload - async def create_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @overload - async def create_and_poll( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.agents.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @overload - async def create_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - @distributed_trace_async - async def create_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.agents.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the polling times out. - """ - - curr_time = time.monotonic() - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file = await super().create( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file = await super().create( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") - else: - vector_store_file = await super().create( - vector_store_id=vector_store_id, - file_id=file_id, - data_source=data_source, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file.status == "in_progress": - - if timeout is not None and (time.monotonic() - curr_time - polling_interval) >= timeout: - raise TimeoutError("Timeout reached. Stopping polling.") - - await asyncio.sleep(polling_interval) - vector_store_file = await super().get(vector_store_id=vector_store_id, file_id=vector_store_file.id) - - return vector_store_file - - -class MessagesOperations(MessagesOperationsGenerated): - - async def get_last_message_by_role( - self, - thread_id: str, - role: _models.MessageRole, - **kwargs, - ) -> Optional[_models.ThreadMessage]: - """ - Return the most-recent message in *thread_id* authored by *role*. - - The implementation streams messages (newest first, where the - service/SDK supports that) and stops at the first match. - - :param thread_id: The ID of the thread to search. - :type thread_id: str - :param role: The role of the message author. - :type role: ~azure.ai.agents.models.MessageRole - - :return: The most recent message authored by *role* in the thread, or None if no such message exists. - :rtype: Optional[~azure.ai.agents.models.ThreadMessage] - """ - pageable = self.list(thread_id, **kwargs) - - async for message in pageable: - if message.role == role: - return message - return None - - async def get_last_text_message_by_role( - self, - thread_id: str, - role: _models.MessageRole, - **kwargs, - ) -> Optional[_models.MessageTextContent]: - """ - Return the most-recent *text* message in *thread_id* authored by *role*. - - :param thread_id: The ID of the thread to search. - :type thread_id: str - :param role: The role of the message author. - :type role: ~azure.ai.agents.models.MessageRole - - :return: The most recent text message authored by *role* in the thread, or None if no such message exists. - :rtype: Optional[~azure.ai.agents.models.MessageTextContent] - """ - msg = await self.get_last_message_by_role(thread_id, role, **kwargs) - if msg: - text_contents = msg.text_messages - if text_contents: - return text_contents[-1] - return None - - -__all__: List[str] = [ - "MessagesOperations", - "RunsOperations", - "FilesOperations", - "VectorStoresOperations", - "VectorStoreFilesOperations", - "VectorStoreFileBatchesOperations", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py b/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py index e7cdeca4c2f4..cde5b21b35d0 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py @@ -27,9 +27,6 @@ AzureFunctionDefinition, AzureFunctionStorageQueue, AzureFunctionToolDefinition, - BingCustomSearchConfiguration, - BingCustomSearchConfigurationList, - BingCustomSearchToolDefinition, BingGroundingSearchConfiguration, BingGroundingSearchConfigurationList, BingGroundingToolDefinition, @@ -83,7 +80,6 @@ MessageTextFilePathDetails, MessageTextUrlCitationAnnotation, MessageTextUrlCitationDetails, - MicrosoftFabricToolDefinition, OpenApiAnonymousAuthDetails, OpenApiAuthDetails, OpenApiConnectionAuthDetails, @@ -102,7 +98,6 @@ RunError, RunStep, RunStepAzureAISearchToolCall, - RunStepBingCustomSearchToolCall, RunStepBingGroundingToolCall, RunStepCodeInterpreterImageOutput, RunStepCodeInterpreterImageReference, @@ -136,20 +131,15 @@ RunStepFunctionToolCallDetails, RunStepMessageCreationDetails, RunStepMessageCreationReference, - RunStepMicrosoftFabricToolCall, RunStepOpenAPIToolCall, - RunStepSharepointToolCall, RunStepToolCall, RunStepToolCallDetails, - SharepointToolDefinition, SubmitToolOutputsAction, SubmitToolOutputsDetails, ThreadDeletionStatus, ThreadMessage, ThreadMessageOptions, ThreadRun, - ToolConnection, - ToolConnectionList, ToolDefinition, ToolOutput, ToolResources, @@ -234,9 +224,6 @@ "AzureFunctionDefinition", "AzureFunctionStorageQueue", "AzureFunctionToolDefinition", - "BingCustomSearchConfiguration", - "BingCustomSearchConfigurationList", - "BingCustomSearchToolDefinition", "BingGroundingSearchConfiguration", "BingGroundingSearchConfigurationList", "BingGroundingToolDefinition", @@ -290,7 +277,6 @@ "MessageTextFilePathDetails", "MessageTextUrlCitationAnnotation", "MessageTextUrlCitationDetails", - "MicrosoftFabricToolDefinition", "OpenApiAnonymousAuthDetails", "OpenApiAuthDetails", "OpenApiConnectionAuthDetails", @@ -309,7 +295,6 @@ "RunError", "RunStep", "RunStepAzureAISearchToolCall", - "RunStepBingCustomSearchToolCall", "RunStepBingGroundingToolCall", "RunStepCodeInterpreterImageOutput", "RunStepCodeInterpreterImageReference", @@ -343,20 +328,15 @@ "RunStepFunctionToolCallDetails", "RunStepMessageCreationDetails", "RunStepMessageCreationReference", - "RunStepMicrosoftFabricToolCall", "RunStepOpenAPIToolCall", - "RunStepSharepointToolCall", "RunStepToolCall", "RunStepToolCallDetails", - "SharepointToolDefinition", "SubmitToolOutputsAction", "SubmitToolOutputsDetails", "ThreadDeletionStatus", "ThreadMessage", "ThreadMessageOptions", "ThreadRun", - "ToolConnection", - "ToolConnectionList", "ToolDefinition", "ToolOutput", "ToolResources", diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_enums.py b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_enums.py index 77e4aeb445b1..b86c3dc75319 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_enums.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_enums.py @@ -19,12 +19,6 @@ class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Tool type ``code_interpreter``""" FILE_SEARCH = "file_search" """Tool type ``file_search``""" - BING_GROUNDING = "bing_grounding" - """Tool type ``bing_grounding``""" - MICROSOFT_FABRIC = "fabric_dataagent" - """Tool type ``fabric_dataagent``""" - SHAREPOINT = "sharepoint_grounding" - """Tool type ``sharepoint_grounding``""" AZURE_AI_SEARCH = "azure_ai_search" """Tool type ``azure_ai_search``""" BING_CUSTOM_SEARCH = "bing_custom_search" diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py index 1bb9fee68b9c..67dd42d30b39 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py @@ -191,9 +191,8 @@ class AgentsNamedToolChoice(_Model): """Specifies a tool the model should use. Use to force the model to call a specific tool. :ivar type: the type of tool. If type is ``function``, the function name must be set. Required. - Known values are: "function", "code_interpreter", "file_search", "bing_grounding", - "fabric_dataagent", "sharepoint_grounding", "azure_ai_search", "bing_custom_search", and - "connected_agent". + Known values are: "function", "code_interpreter", "file_search", "azure_ai_search", + "bing_custom_search", and "connected_agent". :vartype type: str or ~azure.ai.agents.models.AgentsNamedToolChoiceType :ivar function: The name of the function to call. :vartype function: ~azure.ai.agents.models.FunctionName @@ -203,9 +202,8 @@ class AgentsNamedToolChoice(_Model): visibility=["read", "create", "update", "delete", "query"] ) """the type of tool. If type is ``function``, the function name must be set. Required. Known - values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", - \"fabric_dataagent\", \"sharepoint_grounding\", \"azure_ai_search\", \"bing_custom_search\", - and \"connected_agent\".""" + values are: \"function\", \"code_interpreter\", \"file_search\", \"azure_ai_search\", + \"bing_custom_search\", and \"connected_agent\".""" function: Optional["_models.FunctionName"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the function to call.""" @@ -472,10 +470,9 @@ class ToolDefinition(_Model): """An abstract representation of an input tool definition that an agent can use. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AzureAISearchToolDefinition, AzureFunctionToolDefinition, BingCustomSearchToolDefinition, - BingGroundingToolDefinition, CodeInterpreterToolDefinition, ConnectedAgentToolDefinition, - MicrosoftFabricToolDefinition, FileSearchToolDefinition, FunctionToolDefinition, - OpenApiToolDefinition, SharepointToolDefinition + AzureAISearchToolDefinition, AzureFunctionToolDefinition, BingGroundingToolDefinition, + CodeInterpreterToolDefinition, ConnectedAgentToolDefinition, FileSearchToolDefinition, + FunctionToolDefinition, OpenApiToolDefinition :ivar type: The object type. Required. Default value is None. :vartype type: str @@ -687,133 +684,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type="azure_function", **kwargs) -class BingCustomSearchConfiguration(_Model): - """A bing custom search configuration. - - :ivar connection_id: Connection id for grounding with bing search. Required. - :vartype connection_id: str - :ivar instance_name: Name of the custom configuration instance given to config. Required. - :vartype instance_name: str - :ivar market: The market where the results come from. - :vartype market: str - :ivar set_lang: The language to use for user interface strings when calling Bing API. - :vartype set_lang: str - :ivar count: The number of search results to return in the bing api response. - :vartype count: int - :ivar freshness: Filter search results by a specific time range. Accepted values: - `https://learn.microsoft.com/bing/search-apis/bing-web-search/reference/query-parameters - `_. - :vartype freshness: str - """ - - connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Connection id for grounding with bing search. Required.""" - instance_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Name of the custom configuration instance given to config. Required.""" - market: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The market where the results come from.""" - set_lang: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The language to use for user interface strings when calling Bing API.""" - count: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The number of search results to return in the bing api response.""" - freshness: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Filter search results by a specific time range. Accepted values: - `https://learn.microsoft.com/bing/search-apis/bing-web-search/reference/query-parameters - `_.""" - - @overload - def __init__( - self, - *, - connection_id: str, - instance_name: str, - market: Optional[str] = None, - set_lang: Optional[str] = None, - count: Optional[int] = None, - freshness: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class BingCustomSearchConfigurationList(_Model): - """A list of search configurations currently used by the ``bing_custom_search`` tool. - - :ivar search_configurations: The connections attached to this tool. There can be a maximum of 1 - connection - resource attached to the tool. Required. - :vartype search_configurations: list[~azure.ai.agents.models.BingCustomSearchConfiguration] - """ - - search_configurations: List["_models.BingCustomSearchConfiguration"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The connections attached to this tool. There can be a maximum of 1 connection - resource attached to the tool. Required.""" - - @overload - def __init__( - self, - *, - search_configurations: List["_models.BingCustomSearchConfiguration"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class BingCustomSearchToolDefinition(ToolDefinition, discriminator="bing_custom_search"): - """The input definition information for a Bing custom search tool as used to configure an agent. - - :ivar type: The object type, which is always 'bing_custom_search'. Required. Default value is - "bing_custom_search". - :vartype type: str - :ivar bing_custom_search: The list of search configurations used by the bing custom search - tool. Required. - :vartype bing_custom_search: ~azure.ai.agents.models.BingCustomSearchConfigurationList - """ - - type: Literal["bing_custom_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The object type, which is always 'bing_custom_search'. Required. Default value is - \"bing_custom_search\".""" - bing_custom_search: "_models.BingCustomSearchConfigurationList" = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The list of search configurations used by the bing custom search tool. Required.""" - - @overload - def __init__( - self, - *, - bing_custom_search: "_models.BingCustomSearchConfigurationList", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="bing_custom_search", **kwargs) - - class BingGroundingSearchConfiguration(_Model): """Search configuration for Bing Grounding. @@ -2834,42 +2704,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="fabric_dataagent"): - """The input definition information for a Microsoft Fabric tool as used to configure an agent. - - :ivar type: The object type, which is always 'fabric_dataagent'. Required. Default value is - "fabric_dataagent". - :vartype type: str - :ivar fabric_dataagent: The list of connections used by the Microsoft Fabric tool. Required. - :vartype fabric_dataagent: ~azure.ai.agents.models.ToolConnectionList - """ - - type: Literal["fabric_dataagent"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The object type, which is always 'fabric_dataagent'. Required. Default value is - \"fabric_dataagent\".""" - fabric_dataagent: "_models.ToolConnectionList" = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The list of connections used by the Microsoft Fabric tool. Required.""" - - @overload - def __init__( - self, - *, - fabric_dataagent: "_models.ToolConnectionList", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="fabric_dataagent", **kwargs) - - class OpenApiAuthDetails(_Model): """authentication details for OpenApiFunctionDefinition. @@ -3006,6 +2840,8 @@ class OpenApiFunctionDefinition(_Model): :vartype auth: ~azure.ai.agents.models.OpenApiAuthDetails :ivar default_params: List of OpenAPI spec parameters that will use user-provided defaults. :vartype default_params: list[str] + :ivar functions: List of function definitions used by OpenApi tool. + :vartype functions: list[~azure.ai.agents.models.FunctionDefinition] """ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -3019,6 +2855,8 @@ class OpenApiFunctionDefinition(_Model): """Open API authentication details. Required.""" default_params: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of OpenAPI spec parameters that will use user-provided defaults.""" + functions: Optional[List["_models.FunctionDefinition"]] = rest_field(visibility=["read"]) + """List of function definitions used by OpenApi tool.""" @overload def __init__( @@ -3572,9 +3410,8 @@ class RunStepToolCall(_Model): existing run. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepAzureAISearchToolCall, RunStepBingCustomSearchToolCall, RunStepBingGroundingToolCall, - RunStepCodeInterpreterToolCall, RunStepMicrosoftFabricToolCall, RunStepFileSearchToolCall, - RunStepFunctionToolCall, RunStepOpenAPIToolCall, RunStepSharepointToolCall + RunStepAzureAISearchToolCall, RunStepBingGroundingToolCall, RunStepCodeInterpreterToolCall, + RunStepFileSearchToolCall, RunStepFunctionToolCall, RunStepOpenAPIToolCall :ivar type: The object type. Required. Default value is None. :vartype type: str @@ -3648,46 +3485,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type="azure_ai_search", **kwargs) -class RunStepBingCustomSearchToolCall(RunStepToolCall, discriminator="bing_custom_search"): - """A record of a call to a bing custom search tool, issued by the model in evaluation of a defined - tool, that represents - executed search with bing custom search. - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'bing_custom_search'. Required. Default value is - "bing_custom_search". - :vartype type: str - :ivar bing_custom_search: Reserved for future use. Required. - :vartype bing_custom_search: dict[str, str] - """ - - type: Literal["bing_custom_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The object type, which is always 'bing_custom_search'. Required. Default value is - \"bing_custom_search\".""" - bing_custom_search: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - bing_custom_search: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="bing_custom_search", **kwargs) - - class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_grounding"): """A record of a call to a bing grounding tool, issued by the model in evaluation of a defined tool, that represents @@ -4903,48 +4700,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="fabric_dataagent"): - """A record of a call to a Microsoft Fabric tool, issued by the model in evaluation of a defined - tool, that represents - executed Microsoft Fabric operations. - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'fabric_dataagent'. Required. Default value is - "fabric_dataagent". - :vartype type: str - :ivar microsoft_fabric: Reserved for future use. Required. - :vartype microsoft_fabric: dict[str, str] - """ - - type: Literal["fabric_dataagent"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The object type, which is always 'fabric_dataagent'. Required. Default value is - \"fabric_dataagent\".""" - microsoft_fabric: Dict[str, str] = rest_field( - name="fabric_dataagent", visibility=["read", "create", "update", "delete", "query"] - ) - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - microsoft_fabric: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="fabric_dataagent", **kwargs) - - class RunStepOpenAPIToolCall(RunStepToolCall, discriminator="openapi"): """A record of a call to an OpenAPI tool, issued by the model in evaluation of a defined tool, that represents @@ -4983,48 +4738,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type="openapi", **kwargs) -class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint_grounding"): - """A record of a call to a SharePoint tool, issued by the model in evaluation of a defined tool, - that represents - executed SharePoint actions. - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'sharepoint_grounding'. Required. Default value is - "sharepoint_grounding". - :vartype type: str - :ivar share_point: Reserved for future use. Required. - :vartype share_point: dict[str, str] - """ - - type: Literal["sharepoint_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The object type, which is always 'sharepoint_grounding'. Required. Default value is - \"sharepoint_grounding\".""" - share_point: Dict[str, str] = rest_field( - name="sharepoint_grounding", visibility=["read", "create", "update", "delete", "query"] - ) - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - share_point: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="sharepoint_grounding", **kwargs) - - class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): """The detailed information associated with a run step calling tools. @@ -5059,42 +4772,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) -class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint_grounding"): - """The input definition information for a sharepoint tool as used to configure an agent. - - :ivar type: The object type, which is always 'sharepoint_grounding'. Required. Default value is - "sharepoint_grounding". - :vartype type: str - :ivar sharepoint_grounding: The list of connections used by the SharePoint tool. Required. - :vartype sharepoint_grounding: ~azure.ai.agents.models.ToolConnectionList - """ - - type: Literal["sharepoint_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The object type, which is always 'sharepoint_grounding'. Required. Default value is - \"sharepoint_grounding\".""" - sharepoint_grounding: "_models.ToolConnectionList" = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The list of connections used by the SharePoint tool. Required.""" - - @overload - def __init__( - self, - *, - sharepoint_grounding: "_models.ToolConnectionList", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="sharepoint_grounding", **kwargs) - - class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): """The details for required tool calls that must be submitted for an agent thread run to continue. @@ -5609,68 +5286,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["thread.run"] = "thread.run" -class ToolConnection(_Model): - """A connection resource. - - :ivar connection_id: A connection in a ToolConnectionList attached to this tool. Required. - :vartype connection_id: str - """ - - connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """A connection in a ToolConnectionList attached to this tool. Required.""" - - @overload - def __init__( - self, - *, - connection_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ToolConnectionList(_Model): - """A set of connection resources currently used by either the ``bing_grounding``, - ``fabric_dataagent``, or ``sharepoint_grounding`` tools. - - :ivar connection_list: The connections attached to this tool. There can be a maximum of 1 - connection - resource attached to the tool. - :vartype connection_list: list[~azure.ai.agents.models.ToolConnection] - """ - - connection_list: Optional[List["_models.ToolConnection"]] = rest_field( - name="connections", visibility=["read", "create", "update", "delete", "query"] - ) - """The connections attached to this tool. There can be a maximum of 1 connection - resource attached to the tool.""" - - @overload - def __init__( - self, - *, - connection_list: Optional[List["_models.ToolConnection"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class ToolOutput(_Model): """The data provided during a tool outputs submission to resolve pending tool calls and allow the model to continue. diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_patch.py b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_patch.py index d9b7d9b3dcb7..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_patch.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_patch.py @@ -1,1826 +1,15 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio # pylint: disable = do-not-import-asyncio -import inspect -import itertools -import json -import logging -import re -from abc import ABC, abstractmethod -from typing import ( - Any, - AsyncIterator, - Awaitable, - Callable, - Dict, - Generic, - Iterator, - List, - Mapping, - Optional, - Set, - Tuple, - Type, - TypeVar, - Union, - cast, - get_args, - get_origin, - overload, -) +from typing import List -from ._enums import AgentStreamEvent, AzureAISearchQueryType -from ._models import ( - AISearchIndexResource, - AzureAISearchResource, - AzureAISearchToolDefinition, - AzureFunctionDefinition, - AzureFunctionStorageQueue, - AzureFunctionToolDefinition, - AzureFunctionBinding, - BingCustomSearchToolDefinition, - BingGroundingToolDefinition, - CodeInterpreterToolDefinition, - CodeInterpreterToolResource, - ConnectedAgentToolDefinition, - ConnectedAgentDetails, - FileSearchToolDefinition, - FileSearchToolResource, - FunctionDefinition, - FunctionToolDefinition, - MessageImageFileContent, - MessageTextContent, - MessageTextFileCitationAnnotation, - MessageTextUrlCitationAnnotation, - MessageTextFilePathAnnotation, - MicrosoftFabricToolDefinition, - OpenApiAuthDetails, - OpenApiToolDefinition, - OpenApiFunctionDefinition, - RequiredFunctionToolCall, - RunStep, - RunStepDeltaChunk, - BingCustomSearchConfiguration, - BingCustomSearchConfigurationList, - BingGroundingSearchConfiguration, - BingGroundingSearchConfigurationList, - SharepointToolDefinition, - SubmitToolOutputsAction, - ThreadRun, - ToolConnection, - ToolConnectionList, - ToolDefinition, - ToolResources, - MessageDeltaTextContent, - VectorStoreDataSource, -) - -from ._models import MessageDeltaChunk as MessageDeltaChunkGenerated -from ._models import ThreadMessage as ThreadMessageGenerated -from ._models import MessageAttachment as MessageAttachmentGenerated - -from .. import _types - - -logger = logging.getLogger(__name__) - -StreamEventData = Union["MessageDeltaChunk", "ThreadMessage", ThreadRun, RunStep, str] - - -def _has_errors_in_toolcalls_output(tool_outputs: List[Dict]) -> bool: - """ - Check if any tool output contains an error. - - :param List[Dict] tool_outputs: A list of tool outputs to check. - :return: True if any output contains an error, False otherwise. - :rtype: bool - """ - for tool_output in tool_outputs: - output = tool_output.get("output") - if isinstance(output, str): - try: - output_json = json.loads(output) - if "error" in output_json: - return True - except json.JSONDecodeError: - continue - return False - - -def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: - """ - Remove the parameters, non present in class public fields; return shallow copy of a dictionary. - - **Note:** Classes inherited from the model check that the parameters are present - in the list of attributes and if they are not, the error is being raised. This check may not - be relevant for classes, not inherited from azure.ai.agents._model_base.Model. - :param Type model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :type parameters: Union[str, Dict[str, Any]] - :return: The dictionary with all invalid parameters removed. - :rtype: Dict[str, Any] - """ - new_params = {} - valid_parameters = set( - filter( - lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() - ) - ) - for k in filter(lambda x: x in valid_parameters, parameters.keys()): - new_params[k] = parameters[k] - return new_params - - -def _safe_instantiate( - model_class: Type, parameters: Union[str, Dict[str, Any]], *, generated_class: Optional[Type] = None -) -> StreamEventData: - """ - Instantiate class with the set of parameters from the server. - - :param Type model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :type parameters: Union[str, Dict[str, Any]] - :keyword Optional[Type] generated_class: The optional generated type. - :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. - :rtype: Any - """ - if not generated_class: - generated_class = model_class - if not isinstance(parameters, dict): - return parameters - return cast(StreamEventData, model_class(**_filter_parameters(generated_class, parameters))) - - -def _parse_event(event_data_str: str) -> Tuple[str, StreamEventData]: - event_lines = event_data_str.strip().split("\n") - event_type: Optional[str] = None - event_data = "" - event_obj: StreamEventData - for line in event_lines: - if line.startswith("event:"): - event_type = line.split(":", 1)[1].strip() - elif line.startswith("data:"): - event_data = line.split(":", 1)[1].strip() - - if not event_type: - raise ValueError("Event type not specified in the event data.") - - try: - parsed_data: Union[str, Dict[str, StreamEventData]] = cast(Dict[str, StreamEventData], json.loads(event_data)) - except json.JSONDecodeError: - parsed_data = event_data - - # Workaround for service bug: Rename 'expires_at' to 'expired_at' - if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: - parsed_data["expired_at"] = parsed_data.pop("expires_at") - - if isinstance(parsed_data, dict) and "assistant_id" in parsed_data: - parsed_data["agent_id"] = parsed_data.pop("assistant_id") - - # Map to the appropriate class instance - if event_type in { - AgentStreamEvent.THREAD_RUN_CREATED.value, - AgentStreamEvent.THREAD_RUN_QUEUED.value, - AgentStreamEvent.THREAD_RUN_INCOMPLETE.value, - AgentStreamEvent.THREAD_RUN_IN_PROGRESS.value, - AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION.value, - AgentStreamEvent.THREAD_RUN_COMPLETED.value, - AgentStreamEvent.THREAD_RUN_FAILED.value, - AgentStreamEvent.THREAD_RUN_CANCELLING.value, - AgentStreamEvent.THREAD_RUN_CANCELLED.value, - AgentStreamEvent.THREAD_RUN_EXPIRED.value, - }: - event_obj = _safe_instantiate(ThreadRun, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_RUN_STEP_CREATED.value, - AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS.value, - AgentStreamEvent.THREAD_RUN_STEP_COMPLETED.value, - AgentStreamEvent.THREAD_RUN_STEP_FAILED.value, - AgentStreamEvent.THREAD_RUN_STEP_CANCELLED.value, - AgentStreamEvent.THREAD_RUN_STEP_EXPIRED.value, - }: - event_obj = _safe_instantiate(RunStep, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_MESSAGE_CREATED.value, - AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS.value, - AgentStreamEvent.THREAD_MESSAGE_COMPLETED.value, - AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE.value, - }: - event_obj = _safe_instantiate(ThreadMessage, parsed_data, generated_class=ThreadMessageGenerated) - elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA.value: - event_obj = _safe_instantiate(MessageDeltaChunk, parsed_data, generated_class=MessageDeltaChunkGenerated) - - elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA.value: - event_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) - else: - event_obj = str(parsed_data) - - return event_type, event_obj - - -# Define type_map to translate Python type annotations to JSON Schema types -type_map = { - "str": "string", - "int": "integer", - "float": "number", - "bool": "boolean", - "NoneType": "null", - "list": "array", - "dict": "object", -} - - -def _map_type(annotation) -> Dict[str, Any]: # pylint: disable=too-many-return-statements - if annotation == inspect.Parameter.empty: - return {"type": "string"} # Default type if annotation is missing - - origin = get_origin(annotation) - - if origin in {list, List}: - args = get_args(annotation) - item_type = args[0] if args else str - return {"type": "array", "items": _map_type(item_type)} - if origin in {dict, Dict}: - return {"type": "object"} - if origin is Union: - args = get_args(annotation) - # If Union contains None, it is an optional parameter - if type(None) in args: - # If Union contains only one non-None type, it is a nullable parameter - non_none_args = [arg for arg in args if arg is not type(None)] - if len(non_none_args) == 1: - schema = _map_type(non_none_args[0]) - if "type" in schema: - if isinstance(schema["type"], str): - schema["type"] = [schema["type"], "null"] - elif "null" not in schema["type"]: - schema["type"].append("null") - else: - schema["type"] = ["null"] - return schema - # If Union contains multiple types, it is a oneOf parameter - return {"oneOf": [_map_type(arg) for arg in args]} - if isinstance(annotation, type): - schema_type = type_map.get(annotation.__name__, "string") - return {"type": schema_type} - - return {"type": "string"} # Fallback to "string" if type is unrecognized - - -def is_optional(annotation) -> bool: - origin = get_origin(annotation) - if origin is Union: - args = get_args(annotation) - return type(None) in args - return False - - -class MessageDeltaChunk(MessageDeltaChunkGenerated): - @property - def text(self) -> str: - """Get the text content of the delta chunk. - - :rtype: str - """ - if not self.delta or not self.delta.content: - return "" - return "".join( - content_part.text.value or "" - for content_part in self.delta.content - if isinstance(content_part, MessageDeltaTextContent) and content_part.text - ) - - -class ThreadMessage(ThreadMessageGenerated): - @property - def text_messages(self) -> List[MessageTextContent]: - """Returns all text message contents in the messages. - - :rtype: List[MessageTextContent] - """ - if not self.content: - return [] - return [content for content in self.content if isinstance(content, MessageTextContent)] - - @property - def image_contents(self) -> List[MessageImageFileContent]: - """Returns all image file contents from image message contents in the messages. - - :rtype: List[MessageImageFileContent] - """ - if not self.content: - return [] - return [content for content in self.content if isinstance(content, MessageImageFileContent)] - - @property - def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: - """Returns all file citation annotations from text message annotations in the messages. - - :rtype: List[MessageTextFileCitationAnnotation] - """ - if not self.content: - return [] - - return [ - annotation - for content in self.content - if isinstance(content, MessageTextContent) - for annotation in content.text.annotations - if isinstance(annotation, MessageTextFileCitationAnnotation) - ] - - @property - def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: - """Returns all file path annotations from text message annotations in the messages. - - :rtype: List[MessageTextFilePathAnnotation] - """ - if not self.content: - return [] - return [ - annotation - for content in self.content - if isinstance(content, MessageTextContent) - for annotation in content.text.annotations - if isinstance(annotation, MessageTextFilePathAnnotation) - ] - - @property - def url_citation_annotations(self) -> List[MessageTextUrlCitationAnnotation]: - """Returns all URL citation annotations from text message annotations in the messages. - - :rtype: List[MessageTextUrlCitationAnnotation] - """ - if not self.content: - return [] - return [ - annotation - for content in self.content - if isinstance(content, MessageTextContent) - for annotation in content.text.annotations - if isinstance(annotation, MessageTextUrlCitationAnnotation) - ] - - -class MessageAttachment(MessageAttachmentGenerated): - @overload - def __init__( - self, - *, - tools: List["FileSearchToolDefinition"], - file_id: Optional[str] = None, - data_source: Optional["VectorStoreDataSource"] = None, - ) -> None: ... - @overload - def __init__( - self, - *, - tools: List["CodeInterpreterToolDefinition"], - file_id: Optional[str] = None, - data_source: Optional["VectorStoreDataSource"] = None, - ) -> None: ... - @overload - def __init__( - self, - *, - tools: List["_types.MessageAttachmentToolDefinition"], - file_id: Optional[str] = None, - data_source: Optional["VectorStoreDataSource"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -ToolDefinitionT = TypeVar("ToolDefinitionT", bound=ToolDefinition) -ToolT = TypeVar("ToolT", bound="Tool") - - -class Tool(ABC, Generic[ToolDefinitionT]): - """ - An abstract class representing a tool that can be used by an agent. - """ - - @property - @abstractmethod - def definitions(self) -> List[ToolDefinitionT]: - """Get the tool definitions.""" - - @property - @abstractmethod - def resources(self) -> ToolResources: - """Get the tool resources.""" - - @abstractmethod - def execute(self, tool_call: Any) -> Any: - """ - Execute the tool with the provided tool call. - - :param Any tool_call: The tool call to execute. - :return: The output of the tool operations. - """ - - -class BaseFunctionTool(Tool[FunctionToolDefinition]): - """ - A tool that executes user-defined functions. - """ - - def __init__(self, functions: Set[Callable[..., Any]]): - """ - Initialize FunctionTool with a set of functions. - - :param functions: A set of function objects. - """ - self._functions = self._create_function_dict(functions) - self._definitions = self._build_function_definitions(self._functions) - - def add_functions(self, extra_functions: Set[Callable[..., Any]]) -> None: - """ - Add more functions into this FunctionTool existing function set. - If a function with the same name already exists, it is overwritten. - - :param extra_functions: A set of additional functions to be added to - the existing function set. Functions are defined as callables and - may have any number of arguments and return types. - :type extra_functions: Set[Callable[..., Any]] - """ - # Convert the existing dictionary of { name: function } back into a set - existing_functions = set(self._functions.values()) - # Merge old + new - combined = existing_functions.union(extra_functions) - # Rebuild state - self._functions = self._create_function_dict(combined) - self._definitions = self._build_function_definitions(self._functions) - - def _create_function_dict(self, functions: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: - return {func.__name__: func for func in functions} - - def _build_function_definitions(self, functions: Dict[str, Any]) -> List[FunctionToolDefinition]: - specs: List[FunctionToolDefinition] = [] - # Flexible regex to capture ':param : ' - param_pattern = re.compile( - r""" - ^\s* # Optional leading whitespace - :param # Literal ':param' - \s+ # At least one whitespace character - (?P[^:\s\(\)]+) # Parameter name (no spaces, colons, or parentheses) - (?:\s*\(\s*(?P[^)]+?)\s*\))? # Optional type in parentheses, allowing internal spaces - \s*:\s* # Colon ':' surrounded by optional whitespace - (?P.+) # Description (rest of the line) - """, - re.VERBOSE, - ) - - for name, func in functions.items(): - sig = inspect.signature(func) - params = sig.parameters - docstring = inspect.getdoc(func) or "" - description = docstring.split("\n", maxsplit=1)[0] if docstring else "No description" - - param_descriptions = {} - for line in docstring.splitlines(): - line = line.strip() - match = param_pattern.match(line) - if match: - groups = match.groupdict() - param_name = groups.get("name") - param_desc = groups.get("description") - param_desc = param_desc.strip() if param_desc else "No description" - param_descriptions[param_name] = param_desc.strip() - - properties = {} - required = [] - for param_name, param in params.items(): - param_type_info = _map_type(param.annotation) - param_description = param_descriptions.get(param_name, "No description") - - properties[param_name] = {**param_type_info, "description": param_description} - - # If the parameter has no default value and is not optional, add it to the required list - if param.default is inspect.Parameter.empty and not is_optional(param.annotation): - required.append(param_name) - - function_def = FunctionDefinition( - name=name, - description=description, - parameters={"type": "object", "properties": properties, "required": required}, - ) - tool_def = FunctionToolDefinition(function=function_def) - specs.append(tool_def) - - return specs - - def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: - function_name = tool_call.function.name - arguments = tool_call.function.arguments - - if function_name not in self._functions: - raise ValueError( - f"Function '{function_name}' not found. Provide this function " - f"to `enable_auto_function_calls` function call." - ) - - function = self._functions[function_name] - - try: - parsed_arguments = json.loads(arguments) - except json.JSONDecodeError as e: - raise ValueError(f"Invalid JSON arguments: {e}") from e - - if not isinstance(parsed_arguments, dict): - raise TypeError("Arguments must be a JSON object.") - - return function, parsed_arguments - - @property - def definitions(self) -> List[FunctionToolDefinition]: - """ - Get the function definitions. - - :return: A list of function definitions. - :rtype: List[ToolDefinition] - """ - return self._definitions - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources for the agent. - - :return: An empty ToolResources as FunctionTool doesn't have specific resources. - :rtype: ToolResources - """ - return ToolResources() - - -class FunctionTool(BaseFunctionTool): - - def execute(self, tool_call: RequiredFunctionToolCall) -> Any: - try: - function, parsed_arguments = self._get_func_and_args(tool_call) - return function(**parsed_arguments) if parsed_arguments else function() - except Exception as e: # pylint: disable=broad-exception-caught - error_message = f"Error executing function '{tool_call.function.name}': {e}" - logger.error(error_message) - # Return error message as JSON string back to agent in order to make possible self - # correction to the function call - return json.dumps({"error": error_message}) - - -class AsyncFunctionTool(BaseFunctionTool): - - async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: # pylint: disable=invalid-overridden-method - try: - function, parsed_arguments = self._get_func_and_args(tool_call) - if inspect.iscoroutinefunction(function): - return await function(**parsed_arguments) if parsed_arguments else await function() - return function(**parsed_arguments) if parsed_arguments else function() - except Exception as e: # pylint: disable=broad-exception-caught - error_message = f"Error executing function '{tool_call.function.name}': {e}" - logger.error(error_message) - # Return error message as JSON string back to agent in order to make possible self correction - # to the function call - return json.dumps({"error": error_message}) - - -class AzureAISearchTool(Tool[AzureAISearchToolDefinition]): - """ - A tool that searches for information using Azure AI Search. - :param connection_id: Connection ID used by tool. All connection tools allow only one connection. - """ - - def __init__( - self, - index_connection_id: str, - index_name: str, - query_type: AzureAISearchQueryType = AzureAISearchQueryType.SIMPLE, - filter: str = "", - top_k: int = 5, - index_asset_id: str = "", - ): - """ - Initialize AzureAISearch with an index_connection_id and index_name, with optional params. - - :param index_connection_id: Index Connection ID used by tool. Allows only one connection. - :type index_connection_id: str - :param index_name: Name of Index in search resource to be used by tool. - :type index_name: str - :param query_type: Type of query in an AIIndexResource attached to this agent. - Default value is AzureAISearchQueryType.SIMPLE. - :type query_type: AzureAISearchQueryType - :param filter: Odata filter string for search resource. - :type filter: str - :param top_k: Number of documents to retrieve from search and present to the model. - :type top_k: int - :param index_asset_id: Index asset ID to be used by tool. - :type filter: str - """ - self.index_list = [ - AISearchIndexResource( - index_connection_id=index_connection_id, - index_name=index_name, - query_type=query_type, - filter=filter, - top_k=top_k, - index_asset_id=index_asset_id, - ) - ] - - @property - def definitions(self) -> List[AzureAISearchToolDefinition]: - """ - Get the Azure AI search tool definitions. - - :return: A list of tool definitions. - :rtype: List[ToolDefinition] - """ - return [AzureAISearchToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the Azure AI search resources. - - :return: ToolResources populated with azure_ai_search associated resources. - :rtype: ToolResources - """ - return ToolResources(azure_ai_search=AzureAISearchResource(index_list=self.index_list)) - - def execute(self, tool_call: Any): - """ - AI Search tool does not execute client-side. - - :param Any tool_call: The tool call to execute. - """ - - -class OpenApiTool(Tool[OpenApiToolDefinition]): - """ - A tool that retrieves information using OpenAPI specs. - Initialized with an initial API definition (name, description, spec, auth), - this class also supports adding and removing additional API definitions dynamically. - """ - - def __init__( - self, - name: str, - description: str, - spec: Any, - auth: OpenApiAuthDetails, - default_parameters: Optional[List[str]] = None, - ) -> None: - """ - Constructor initializes the tool with a primary API definition. - - :param name: The name of the API. - :type name: str - :param description: The API description. - :type description: str - :param spec: The API specification. - :type spec: Any - :param auth: Authentication details for the API. - :type auth: OpenApiAuthDetails - :param default_parameters: List of OpenAPI spec parameters that will use user-provided defaults. - :type default_parameters: Optional[List[str]] - """ - default_params: List[str] = [] if default_parameters is None else default_parameters - self._default_auth = auth - self._definitions: List[OpenApiToolDefinition] = [ - OpenApiToolDefinition( - openapi=OpenApiFunctionDefinition( - name=name, description=description, spec=spec, auth=auth, default_params=default_params - ) - ) - ] - - @property - def definitions(self) -> List[OpenApiToolDefinition]: - """ - Get the list of all API definitions for the tool. - - :return: A list of OpenAPI tool definitions. - :rtype: List[ToolDefinition] - """ - return self._definitions - - def add_definition( - self, - name: str, - description: str, - spec: Any, - auth: Optional[OpenApiAuthDetails] = None, - default_parameters: Optional[List[str]] = None, - ) -> None: - """ - Adds a new API definition dynamically. - Raises a ValueError if a definition with the same name already exists. - - :param name: The name of the API. - :type name: str - :param description: The description of the API. - :type description: str - :param spec: The API specification. - :type spec: Any - :param auth: Optional authentication details for this particular API definition. - If not provided, the tool's default authentication details will be used. - :type auth: Optional[OpenApiAuthDetails] - :param default_parameters: List of OpenAPI spec parameters that will use user-provided defaults. - :type default_parameters: List[str] - :raises ValueError: If a definition with the same name exists. - """ - default_params: List[str] = [] if default_parameters is None else default_parameters - - # Check if a definition with the same name exists. - if any(definition.openapi.name == name for definition in self._definitions): - raise ValueError(f"Definition '{name}' already exists and cannot be added again.") - - # Use provided auth if specified, otherwise use default - auth_to_use = auth if auth is not None else self._default_auth - - new_definition = OpenApiToolDefinition( - openapi=OpenApiFunctionDefinition( - name=name, description=description, spec=spec, auth=auth_to_use, default_params=default_params - ) - ) - self._definitions.append(new_definition) - - def remove_definition(self, name: str) -> None: - """ - Removes an API definition based on its name. - - :param name: The name of the API definition to remove. - :type name: str - :raises ValueError: If the definition with the specified name does not exist. - """ - for definition in self._definitions: - if definition.openapi.name == name: - self._definitions.remove(definition) - logger.info("Definition '%s' removed. Total definitions: %d.", name, len(self._definitions)) - return - raise ValueError(f"Definition with the name '{name}' does not exist.") - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources for the agent. - - :return: An empty ToolResources as OpenApiTool doesn't have specific resources. - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> None: - """ - OpenApiTool does not execute client-side. - - :param Any tool_call: The tool call to execute. - :type tool_call: Any - """ - - -class AzureFunctionTool(Tool[AzureFunctionToolDefinition]): - """ - A tool that is used to inform agent about available the Azure function. - - :param name: The azure function name. - :param description: The azure function description. - :param parameters: The description of function parameters. - :param input_queue: Input queue used, by azure function. - :param output_queue: Output queue used, by azure function. - """ - - def __init__( - self, - name: str, - description: str, - parameters: Dict[str, Any], - input_queue: AzureFunctionStorageQueue, - output_queue: AzureFunctionStorageQueue, - ) -> None: - self._definitions = [ - AzureFunctionToolDefinition( - azure_function=AzureFunctionDefinition( - function=FunctionDefinition( - name=name, - description=description, - parameters=parameters, - ), - input_binding=AzureFunctionBinding(storage_queue=input_queue), - output_binding=AzureFunctionBinding(storage_queue=output_queue), - ) - ) - ] - - @property - def definitions(self) -> List[AzureFunctionToolDefinition]: - """ - Get the Azure AI search tool definitions. - - :rtype: List[ToolDefinition] - """ - return self._definitions - - @property - def resources(self) -> ToolResources: - """ - Get the Azure AI search resources. - - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> Any: - pass - - -class ConnectionTool(Tool[ToolDefinitionT]): - """ - A tool that requires connection ids. - Used as base class for Bing Grounding, Sharepoint, and Microsoft Fabric - """ - - def __init__(self, connection_id: str): - """ - Initialize ConnectionTool with a connection_id. - - :param connection_id: Connection ID used by tool. All connection tools allow only one connection. - """ - self.connection_ids = [ToolConnection(connection_id=connection_id)] - - @property - def resources(self) -> ToolResources: - """ - Get the connection tool resources. - - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> Any: - pass - - -class BingGroundingTool(Tool[BingGroundingToolDefinition]): - """ - A tool that searches for information using Bing. - """ - - def __init__(self, connection_id: str, market: str = "", set_lang: str = "", count: int = 5, freshness: str = ""): - """ - Initialize Bing Custom Search with a connection_id. - - :param connection_id: Connection ID used by tool. Bing Custom Search tools allow only one connection. - :param market: - :param set_lang: - :param count: - :param freshness: - """ - self.connection_ids = [ - BingGroundingSearchConfiguration( - connection_id=connection_id, market=market, set_lang=set_lang, count=count, freshness=freshness - ) - ] - - @property - def definitions(self) -> List[BingGroundingToolDefinition]: - """ - Get the Bing grounding tool definitions. - - :rtype: List[ToolDefinition] - """ - return [ - BingGroundingToolDefinition( - bing_grounding=BingGroundingSearchConfigurationList(search_configurations=self.connection_ids) - ) - ] - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources. - - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> Any: - pass - - -class BingCustomSearchTool(Tool[BingCustomSearchToolDefinition]): - """ - A tool that searches for information using Bing Custom Search. - """ - - def __init__(self, connection_id: str, instance_name: str): - """ - Initialize Bing Custom Search with a connection_id. - - :param connection_id: Connection ID used by tool. Bing Custom Search tools allow only one connection. - :param instance_name: Config instance name used by tool. - """ - self.connection_ids = [BingCustomSearchConfiguration(connection_id=connection_id, instance_name=instance_name)] - - @property - def definitions(self) -> List[BingCustomSearchToolDefinition]: - """ - Get the Bing grounding tool definitions. - - :rtype: List[ToolDefinition] - """ - return [ - BingCustomSearchToolDefinition( - bing_custom_search=BingCustomSearchConfigurationList(search_configurations=self.connection_ids) - ) - ] - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources. - - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> Any: - pass - - -class ConnectedAgentTool(Tool[ConnectedAgentToolDefinition]): - """ - A tool that connects to a sub-agent, with a description describing the conditions - or domain where the sub-agent would be called. - """ - - def __init__(self, id: str, name: str, description: str): - """ - Initialize ConnectedAgentTool with an id, name, and description. - - :param id: The ID of the connected agent. - :param name: The name of the connected agent. - :param description: The description of the connected agent, used by the calling agent - to determine when to call the connected agent. - """ - self.connected_agent = ConnectedAgentDetails(id=id, name=name, description=description) - - @property - def definitions(self) -> List[ConnectedAgentToolDefinition]: - """ - Get the connected agent tool definitions. - - :rtype: List[ToolDefinition] - """ - return [ConnectedAgentToolDefinition(connected_agent=self.connected_agent)] - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources for the agent. - - :return: An empty ToolResources as ConnectedAgentTool doesn't have specific resources. - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> None: - """ - ConnectedAgentTool does not execute client-side. - - :param Any tool_call: The tool call to execute. - :type tool_call: Any - """ - - -class FabricTool(ConnectionTool[MicrosoftFabricToolDefinition]): - """ - A tool that searches for information using Microsoft Fabric. - """ - - @property - def definitions(self) -> List[MicrosoftFabricToolDefinition]: - """ - Get the Microsoft Fabric tool definitions. - - :rtype: List[ToolDefinition] - """ - return [MicrosoftFabricToolDefinition(fabric_dataagent=ToolConnectionList(connection_list=self.connection_ids))] - - -class SharepointTool(ConnectionTool[SharepointToolDefinition]): - """ - A tool that searches for information using Sharepoint. - """ - - @property - def definitions(self) -> List[SharepointToolDefinition]: - """ - Get the Sharepoint tool definitions. - - :rtype: List[ToolDefinition] - """ - return [SharepointToolDefinition(sharepoint_grounding=ToolConnectionList(connection_list=self.connection_ids))] - - -class FileSearchTool(Tool[FileSearchToolDefinition]): - """ - A tool that searches for uploaded file information from the created vector stores. - - :param vector_store_ids: A list of vector store IDs to search for files. - :type vector_store_ids: list[str] - """ - - def __init__(self, vector_store_ids: Optional[List[str]] = None): - if vector_store_ids is None: - self.vector_store_ids = set() - else: - self.vector_store_ids = set(vector_store_ids) - - def add_vector_store(self, store_id: str) -> None: - """ - Add a vector store ID to the list of vector stores to search for files. - - :param store_id: The ID of the vector store to search for files. - :type store_id: str - - """ - self.vector_store_ids.add(store_id) - - def remove_vector_store(self, store_id: str) -> None: - """ - Remove a vector store ID from the list of vector stores to search for files. - - :param store_id: The ID of the vector store to remove. - :type store_id: str - - """ - self.vector_store_ids.remove(store_id) - - @property - def definitions(self) -> List[FileSearchToolDefinition]: - """ - Get the file search tool definitions. - - :rtype: List[ToolDefinition] - """ - return [FileSearchToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the file search resources. - - :rtype: ToolResources - """ - return ToolResources(file_search=FileSearchToolResource(vector_store_ids=list(self.vector_store_ids))) - - def execute(self, tool_call: Any) -> Any: - pass - - -class CodeInterpreterTool(Tool[CodeInterpreterToolDefinition]): - """ - A tool that interprets code files uploaded to the agent. - - :param file_ids: A list of file IDs to interpret. - :type file_ids: list[str] - """ - - def __init__(self, file_ids: Optional[List[str]] = None): - if file_ids is None: - self.file_ids = set() - else: - self.file_ids = set(file_ids) - - def add_file(self, file_id: str) -> None: - """ - Add a file ID to the list of files to interpret. - - :param file_id: The ID of the file to interpret. - :type file_id: str - """ - self.file_ids.add(file_id) - - def remove_file(self, file_id: str) -> None: - """ - Remove a file ID from the list of files to interpret. - - :param file_id: The ID of the file to remove. - :type file_id: str - """ - self.file_ids.remove(file_id) - - @property - def definitions(self) -> List[CodeInterpreterToolDefinition]: - """ - Get the code interpreter tool definitions. - - :rtype: List[ToolDefinition] - """ - return [CodeInterpreterToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the code interpreter resources. - - :rtype: ToolResources - """ - if not self.file_ids: - return ToolResources() - return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=list(self.file_ids))) - - def execute(self, tool_call: Any) -> Any: - pass - - -class BaseToolSet: - """ - Abstract class for a collection of tools that can be used by an agent. - """ - - def __init__(self) -> None: - self._tools: List[Tool] = [] - - def validate_tool_type(self, tool: Tool) -> None: - pass - - def add(self, tool: Tool): - """ - Add a tool to the tool set. - - :param Tool tool: The tool to add. - :raises ValueError: If a tool of the same type already exists. - """ - self.validate_tool_type(tool) - - if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): - raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") - self._tools.append(tool) - - def remove(self, tool_type: Type[Tool]) -> None: - """ - Remove a tool of the specified type from the tool set. - - :param Type[Tool] tool_type: The type of tool to remove. - :raises ValueError: If a tool of the specified type is not found. - """ - for i, tool in enumerate(self._tools): - if isinstance(tool, tool_type): - del self._tools[i] - logger.info("Tool of type %s removed from the ToolSet.", tool_type.__name__) - return - raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the definitions for all tools in the tool set. - - :rtype: List[ToolDefinition] - """ - tools = [] - for tool in self._tools: - tools.extend(tool.definitions) - return tools - - @property - def resources(self) -> ToolResources: - """ - Get the resources for all tools in the tool set. - - :rtype: ToolResources - """ - tool_resources: Dict[str, Any] = {} - for tool in self._tools: - resources = tool.resources - for key, value in resources.items(): - if key in tool_resources: - if isinstance(tool_resources[key], dict) and isinstance(value, dict): - tool_resources[key].update(value) - else: - tool_resources[key] = value - return self._create_tool_resources_from_dict(tool_resources) - - def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: - """ - Safely converts a dictionary into a ToolResources instance. - - :param resources: A dictionary of tool resources. Should be a mapping - accepted by ~azure.ai.agents.models.AzureAISearchResource - :type resources: Dict[str, Any] - :return: A ToolResources instance. - :rtype: ToolResources - """ - try: - return ToolResources(**resources) - except TypeError as e: - logger.error("Error creating ToolResources: %s", e) - raise ValueError("Invalid resources for ToolResources.") from e - - def get_definitions_and_resources(self) -> Dict[str, Any]: - """ - Get the definitions and resources for all tools in the tool set. - - :return: A dictionary containing the tool resources and definitions. - :rtype: Dict[str, Any] - """ - return { - "tool_resources": self.resources, - "tools": self.definitions, - } - - def get_tool(self, tool_type: Type[ToolT]) -> ToolT: - """ - Get a tool of the specified type from the tool set. - - :param Type[Tool] tool_type: The type of tool to get. - :return: The tool of the specified type. - :rtype: Tool - :raises ValueError: If a tool of the specified type is not found. - """ - for tool in self._tools: - if isinstance(tool, tool_type): - return cast(ToolT, tool) - raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") - - -class ToolSet(BaseToolSet): - """ - A collection of tools that can be used by an synchronize agent. - """ - - def validate_tool_type(self, tool: Tool) -> None: - """ - Validate the type of the tool. - - :param Tool tool: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool, AsyncFunctionTool): - raise ValueError( - "AsyncFunctionTool is not supported in ToolSet. " - + "To use async functions, use AsyncToolSet and agents operations in azure.ai.agents.aio." - ) - - def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param List[Any] tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - :rtype: Any - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(FunctionTool) - output = tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": str(output), - } - tool_outputs.append(tool_output) - except Exception as e: # pylint: disable=broad-exception-caught - tool_output = {"tool_call_id": tool_call.id, "output": str(e)} - tool_outputs.append(tool_output) - - return tool_outputs - - -class AsyncToolSet(BaseToolSet): - """ - A collection of tools that can be used by an asynchronous agent. - """ - - def validate_tool_type(self, tool: Tool) -> None: - """ - Validate the type of the tool. - - :param Tool tool: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool, FunctionTool): - raise ValueError( - "FunctionTool is not supported in AsyncToolSet. " - + "Please use AsyncFunctionTool instead and provide sync and/or async function(s)." - ) - - async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param List[Any] tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - :rtype: Any - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(AsyncFunctionTool) - output = await tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": str(output), - } - tool_outputs.append(tool_output) - except Exception as e: # pylint: disable=broad-exception-caught - tool_output = {"tool_call_id": tool_call.id, "output": str(e)} - tool_outputs.append(tool_output) - - return tool_outputs - - -EventFunctionReturnT = TypeVar("EventFunctionReturnT") -T = TypeVar("T") -BaseAsyncAgentEventHandlerT = TypeVar("BaseAsyncAgentEventHandlerT", bound="BaseAsyncAgentEventHandler") -BaseAgentEventHandlerT = TypeVar("BaseAgentEventHandlerT", bound="BaseAgentEventHandler") - - -async def async_chain(*iterators: AsyncIterator[T]) -> AsyncIterator[T]: - for iterator in iterators: - async for item in iterator: - yield item - - -class BaseAsyncAgentEventHandler(AsyncIterator[T]): - - def __init__(self) -> None: - self.response_iterator: Optional[AsyncIterator[bytes]] = None - self.submit_tool_outputs: Optional[ - Callable[[ThreadRun, "BaseAsyncAgentEventHandler[T]", bool], Awaitable[Any]] - ] = None - self.buffer: Optional[bytes] = None - - def initialize( - self, - response_iterator: AsyncIterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, "BaseAsyncAgentEventHandler[T]", bool], Awaitable[Any]], - ): - self.response_iterator = ( - async_chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator - ) - self.submit_tool_outputs = submit_tool_outputs - - # cspell:disable-next-line - async def __anext__(self) -> T: - # cspell:disable-next-line - event_bytes = await self.__anext_impl__() - return await self._process_event(event_bytes.decode("utf-8")) - - # cspell:disable-next-line - async def __anext_impl__(self) -> bytes: - self.buffer = b"" if self.buffer is None else self.buffer - if self.response_iterator is None: - raise ValueError("The response handler was not initialized.") - - if not b"\n\n" in self.buffer: - async for chunk in self.response_iterator: - self.buffer += chunk - if b"\n\n" in self.buffer: - break - - if self.buffer == b"": - raise StopAsyncIteration() - - event_bytes = b"" - if b"\n\n" in self.buffer: - event_end_index = self.buffer.index(b"\n\n") - event_bytes = self.buffer[:event_end_index] - self.buffer = self.buffer[event_end_index:].lstrip() - else: - event_bytes = self.buffer - self.buffer = b"" - - return event_bytes - - async def _process_event(self, event_data_str: str) -> T: - raise NotImplementedError("This method needs to be implemented.") - - async def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - Calls the provided callback function with each event data. - """ - try: - async for _ in self: - pass - except StopAsyncIteration: - pass - - -class BaseAgentEventHandler(Iterator[T]): - - def __init__(self) -> None: - self.response_iterator: Optional[Iterator[bytes]] = None - self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAgentEventHandler[T]", bool], Any]] - self.buffer: Optional[bytes] = None - - def initialize( - self, - response_iterator: Iterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, "BaseAgentEventHandler[T]", bool], Any], - ) -> None: - self.response_iterator = ( - itertools.chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator - ) - self.submit_tool_outputs = submit_tool_outputs - - def __next__(self) -> T: - event_bytes = self.__next_impl__() - return self._process_event(event_bytes.decode("utf-8")) - - def __next_impl__(self) -> bytes: - self.buffer = b"" if self.buffer is None else self.buffer - if self.response_iterator is None: - raise ValueError("The response handler was not initialized.") - - if not b"\n\n" in self.buffer: - for chunk in self.response_iterator: - self.buffer += chunk - if b"\n\n" in self.buffer: - break - - if self.buffer == b"": - raise StopIteration() - - event_bytes = b"" - if b"\n\n" in self.buffer: - event_end_index = self.buffer.index(b"\n\n") - event_bytes = self.buffer[:event_end_index] - self.buffer = self.buffer[event_end_index:].lstrip() - else: - event_bytes = self.buffer - self.buffer = b"" - - return event_bytes - - def _process_event(self, event_data_str: str) -> T: - raise NotImplementedError("This method needs to be implemented.") - - def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - Calls the provided callback function with each event data. - """ - try: - for _ in self: - pass - except StopIteration: - pass - - -class AsyncAgentEventHandler(BaseAsyncAgentEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): - def __init__(self) -> None: - super().__init__() - self._max_retry = 10 - self.current_retry = 0 - - def set_max_retry(self, max_retry: int) -> None: - """ - Set the maximum number of retries for tool output submission. - - :param int max_retry: The maximum number of retries. - """ - self._max_retry = max_retry - - async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: - - event_type, event_data_obj = _parse_event(event_data_str) - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - tool_output = await cast( - Callable[[ThreadRun, "BaseAsyncAgentEventHandler", bool], Awaitable[Any]], self.submit_tool_outputs - )(event_data_obj, self, self.current_retry < self._max_retry) - - if _has_errors_in_toolcalls_output(tool_output): - self.current_retry += 1 - - func_rt: Optional[EventFunctionReturnT] = None - try: - if isinstance(event_data_obj, MessageDeltaChunk): - func_rt = await self.on_message_delta(event_data_obj) - elif isinstance(event_data_obj, ThreadMessage): - func_rt = await self.on_thread_message(event_data_obj) - elif isinstance(event_data_obj, ThreadRun): - func_rt = await self.on_thread_run(event_data_obj) - elif isinstance(event_data_obj, RunStep): - func_rt = await self.on_run_step(event_data_obj) - elif isinstance(event_data_obj, RunStepDeltaChunk): - func_rt = await self.on_run_step_delta(event_data_obj) - elif event_type == AgentStreamEvent.ERROR: - func_rt = await self.on_error(event_data_obj) - elif event_type == AgentStreamEvent.DONE: - func_rt = await self.on_done() - else: - func_rt = await self.on_unhandled_event( - event_type, event_data_obj - ) # pylint: disable=assignment-from-none - except Exception as e: # pylint: disable=broad-exception-caught - logger.error("Error in event handler for event '%s': %s", event_type, e) - return event_type, event_data_obj, func_rt - - async def on_message_delta( - self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle message delta events. - - :param MessageDeltaChunk delta: The message delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_thread_message( - self, message: "ThreadMessage" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle thread message events. - - :param ThreadMessage message: The thread message. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_thread_run( - self, run: "ThreadRun" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle thread run events. - - :param ThreadRun run: The thread run. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle run step events. - - :param RunStep step: The run step. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_run_step_delta( - self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle run step delta events. - - :param RunStepDeltaChunk delta: The run step delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle error events. - - :param str data: The error event's data. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_done( - self, - ) -> Optional[EventFunctionReturnT]: - """Handle the completion of the stream. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_unhandled_event( - self, event_type: str, event_data: str # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle any unhandled event types. - - :param str event_type: The event type. - :param Any event_data: The event's data. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - -class AgentEventHandler(BaseAgentEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): - def __init__(self) -> None: - super().__init__() - self._max_retry = 10 - self.current_retry = 0 - - def set_max_retry(self, max_retry: int) -> None: - """ - Set the maximum number of retries for tool output submission. - - :param int max_retry: The maximum number of retries. - """ - self._max_retry = max_retry - - def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: - - event_type, event_data_obj = _parse_event(event_data_str) - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - tool_output = cast(Callable[[ThreadRun, "BaseAgentEventHandler", bool], Any], self.submit_tool_outputs)( - event_data_obj, self, self.current_retry < self._max_retry - ) - - if _has_errors_in_toolcalls_output(tool_output): - self.current_retry += 1 - - func_rt: Optional[EventFunctionReturnT] = None - try: - if isinstance(event_data_obj, MessageDeltaChunk): - func_rt = self.on_message_delta(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, ThreadMessage): - func_rt = self.on_thread_message(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, ThreadRun): - func_rt = self.on_thread_run(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, RunStep): - func_rt = self.on_run_step(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, RunStepDeltaChunk): - func_rt = self.on_run_step_delta(event_data_obj) # pylint: disable=assignment-from-none - elif event_type == AgentStreamEvent.ERROR: - func_rt = self.on_error(event_data_obj) # pylint: disable=assignment-from-none - elif event_type == AgentStreamEvent.DONE: - func_rt = self.on_done() # pylint: disable=assignment-from-none - else: - func_rt = self.on_unhandled_event(event_type, event_data_obj) # pylint: disable=assignment-from-none - except Exception as e: # pylint: disable=broad-exception-caught - logger.error("Error in event handler for event '%s': %s", event_type, e) - return event_type, event_data_obj, func_rt - - def on_message_delta( - self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle message delta events. - - :param MessageDeltaChunk delta: The message delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_thread_message( - self, message: "ThreadMessage" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle thread message events. - - :param ThreadMessage message: The thread message. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_thread_run(self, run: "ThreadRun") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle thread run events. - - :param ThreadRun run: The thread run. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle run step events. - - :param RunStep step: The run step. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_run_step_delta( - self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle run step delta events. - - :param RunStepDeltaChunk delta: The run step delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle error events. - - :param str data: The error event's data. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_done( - self, - ) -> Optional[EventFunctionReturnT]: - """Handle the completion of the stream.""" - return None - - def on_unhandled_event( - self, event_type: str, event_data: str # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle any unhandled event types. - - :param str event_type: The event type. - :param Any event_data: The event's data. - """ - return None - - -class AsyncAgentRunStream(Generic[BaseAsyncAgentEventHandlerT]): - def __init__( - self, - response_iterator: AsyncIterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, BaseAsyncAgentEventHandlerT, bool], Awaitable[Any]], - event_handler: BaseAsyncAgentEventHandlerT, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.submit_tool_outputs = submit_tool_outputs - self.event_handler.initialize( - self.response_iterator, - cast(Callable[[ThreadRun, BaseAsyncAgentEventHandler, bool], Awaitable[Any]], submit_tool_outputs), - ) - - async def __aenter__(self): - return self.event_handler - - async def __aexit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - result = close_method() - if asyncio.iscoroutine(result): - await result - - -class AgentRunStream(Generic[BaseAgentEventHandlerT]): - def __init__( - self, - response_iterator: Iterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, BaseAgentEventHandlerT, bool], Any], - event_handler: BaseAgentEventHandlerT, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.submit_tool_outputs = submit_tool_outputs - self.event_handler.initialize( - self.response_iterator, - cast(Callable[[ThreadRun, BaseAgentEventHandler, bool], Any], submit_tool_outputs), - ) - - def __enter__(self): - return self.event_handler - - def __exit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - close_method() - - -__all__: List[str] = [ - "AgentEventHandler", - "AgentRunStream", - "AsyncAgentRunStream", - "AsyncFunctionTool", - "AsyncToolSet", - "AzureAISearchTool", - "AzureFunctionTool", - "BaseAsyncAgentEventHandler", - "BaseAgentEventHandler", - "CodeInterpreterTool", - "ConnectedAgentTool", - "AsyncAgentEventHandler", - "FileSearchTool", - "FunctionTool", - "OpenApiTool", - "BingCustomSearchTool", - "BingGroundingTool", - "StreamEventData", - "SharepointTool", - "FabricTool", - "AzureAISearchTool", - "Tool", - "ToolSet", - "BaseAsyncAgentEventHandlerT", - "BaseAgentEventHandlerT", - "ThreadMessage", - "MessageTextFileCitationAnnotation", - "MessageDeltaChunk", - "MessageAttachment", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_operations.py b/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_operations.py index 6792ffa03963..56061e6d04b4 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_operations.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=line-too-long,useless-suppression,too-many-lines +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -29,9 +29,10 @@ from azure.core.tracing.decorator import distributed_trace from azure.core.utils import case_insensitive_dict -from .. import models as _models +from .. import models as _models1 +from .....servicepatterns import models as _servicepatterns_models5 from .._configuration import AgentsClientConfiguration -from .._utils.model_base import Model as _Model, SdkJSONEncoder, _deserialize +from .._utils.model_base import Model as _Model, SdkJSONEncoder, _deserialize, _failsafe_deserialize from .._utils.serialization import Deserializer, Serializer from .._utils.utils import ClientMixinABC, prepare_multipart_form_data @@ -71,7 +72,7 @@ def build_threads_create_request(**kwargs: Any) -> HttpRequest: def build_threads_list_request( *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any @@ -209,7 +210,7 @@ def build_messages_list_request( *, run_id: Optional[str] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any @@ -301,7 +302,7 @@ def build_messages_update_request(thread_id: str, message_id: str, **kwargs: Any def build_runs_create_request( - thread_id: str, *, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, **kwargs: Any + thread_id: str, *, include: Optional[List[Union[str, _models1.RunAdditionalFieldList]]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -335,7 +336,7 @@ def build_runs_list_request( thread_id: str, *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any @@ -482,7 +483,7 @@ def build_run_steps_get_request( run_id: str, step_id: str, *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models1.RunAdditionalFieldList]]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -516,9 +517,9 @@ def build_run_steps_list_request( thread_id: str, run_id: str, *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models1.RunAdditionalFieldList]]] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any @@ -558,7 +559,7 @@ def build_run_steps_list_request( def build_files_list_request( - *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + *, purpose: Optional[Union[str, _models1.FilePurpose]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -674,7 +675,7 @@ def build_files_get_file_content_request(file_id: str, **kwargs: Any) -> HttpReq def build_vector_stores_list_request( *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any @@ -805,9 +806,9 @@ def build_vector_stores_delete_request(vector_store_id: str, **kwargs: Any) -> H def build_vector_store_files_list_request( vector_store_id: str, *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + filter: Optional[Union[str, _models1.VectorStoreFileStatusFilter]] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any @@ -1009,9 +1010,9 @@ def build_vector_store_file_batches_list_files_request( # pylint: disable=name- vector_store_id: str, batch_id: str, *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + filter: Optional[Union[str, _models1.VectorStoreFileStatusFilter]] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any @@ -1019,6 +1020,7 @@ def build_vector_store_file_batches_list_files_request( # pylint: disable=name- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1033,6 +1035,7 @@ def build_vector_store_file_batches_list_files_request( # pylint: disable=name- # Construct parameters if filter is not None: _params["filter"] = _SERIALIZER.query("filter", filter, "str") + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") if limit is not None: _params["limit"] = _SERIALIZER.query("limit", limit, "int") if order is not None: @@ -1073,7 +1076,7 @@ def build_agents_create_agent_request(**kwargs: Any) -> HttpRequest: def build_agents_list_agents_request( *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any @@ -1211,7 +1214,7 @@ class ThreadsOperations: :attr:`threads` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AgentsClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -1223,11 +1226,11 @@ def create( self, *, content_type: str = "application/json", - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, + messages: Optional[List[_models1.ThreadMessageOptions]] = None, + tool_resources: Optional[_models1.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models1.AgentThread: """Creates a new thread. Threads contain messages and can be run by agents. :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -1253,7 +1256,7 @@ def create( """ @overload - def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.AgentThread: + def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models1.AgentThread: """Creates a new thread. Threads contain messages and can be run by agents. :param body: Required. @@ -1267,7 +1270,7 @@ def create(self, body: JSON, *, content_type: str = "application/json", **kwargs """ @overload - def create(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.AgentThread: + def create(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models1.AgentThread: """Creates a new thread. Threads contain messages and can be run by agents. :param body: Required. @@ -1285,11 +1288,11 @@ def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, + messages: Optional[List[_models1.ThreadMessageOptions]] = None, + tool_resources: Optional[_models1.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models1.AgentThread: """Creates a new thread. Threads contain messages and can be run by agents. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -1324,7 +1327,7 @@ def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models1.AgentThread] = kwargs.pop("cls", None) if body is _Unset: body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} @@ -1362,12 +1365,13 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models1.AgentThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1379,10 +1383,10 @@ def list( self, *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> Iterable["_models.AgentThread"]: + ) -> Iterable["_models1.AgentThread"]: """Gets a list of threads that were previously created. :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and @@ -1403,7 +1407,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.AgentThread]] = kwargs.pop("cls", None) + cls: ClsType[List[_models1.AgentThread]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -1432,7 +1436,7 @@ def prepare_request(_continuation_token=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.AgentThread], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models1.AgentThread], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, iter(list_of_elem) @@ -1448,14 +1452,15 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response return ItemPaged(get_next, extract_data) @distributed_trace - def get(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + def get(self, thread_id: str, **kwargs: Any) -> _models1.AgentThread: """Gets information about an existing thread. :param thread_id: Identifier of the thread. Required. @@ -1475,7 +1480,7 @@ def get(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models1.AgentThread] = kwargs.pop("cls", None) _request = build_threads_get_request( thread_id=thread_id, @@ -1502,12 +1507,13 @@ def get(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models1.AgentThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1520,10 +1526,10 @@ def update( thread_id: str, *, content_type: str = "application/json", - tool_resources: Optional[_models.ToolResources] = None, + tool_resources: Optional[_models1.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models1.AgentThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -1550,7 +1556,7 @@ def update( @overload def update( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: + ) -> _models1.AgentThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -1568,7 +1574,7 @@ def update( @overload def update( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: + ) -> _models1.AgentThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -1589,10 +1595,10 @@ def update( thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - tool_resources: Optional[_models.ToolResources] = None, + tool_resources: Optional[_models1.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models1.AgentThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -1626,7 +1632,7 @@ def update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models1.AgentThread] = kwargs.pop("cls", None) if body is _Unset: body = {"metadata": metadata, "tool_resources": tool_resources} @@ -1665,12 +1671,13 @@ def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models1.AgentThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1678,7 +1685,7 @@ def update( return deserialized # type: ignore @distributed_trace - def delete(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + def delete(self, thread_id: str, **kwargs: Any) -> _models1.ThreadDeletionStatus: """Deletes an existing thread. :param thread_id: Identifier of the thread. Required. @@ -1698,7 +1705,7 @@ def delete(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadDeletionStatus] = kwargs.pop("cls", None) _request = build_threads_delete_request( thread_id=thread_id, @@ -1725,12 +1732,13 @@ def delete(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + deserialized = _deserialize(_models1.ThreadDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1748,7 +1756,7 @@ class MessagesOperations: :attr:`messages` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AgentsClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -1760,13 +1768,13 @@ def create( self, thread_id: str, *, - role: Union[str, _models.MessageRole], + role: Union[str, _models1.MessageRole], content: "_types.MessageInputContent", content_type: str = "application/json", - attachments: Optional[List[_models.MessageAttachment]] = None, + attachments: Optional[List[_models1.MessageAttachment]] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models1.ThreadMessage: """Creates a new message on a specified thread. :param thread_id: Identifier of the thread. Required. @@ -1801,7 +1809,7 @@ def create( @overload def create( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models1.ThreadMessage: """Creates a new message on a specified thread. :param thread_id: Identifier of the thread. Required. @@ -1819,7 +1827,7 @@ def create( @overload def create( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models1.ThreadMessage: """Creates a new message on a specified thread. :param thread_id: Identifier of the thread. Required. @@ -1840,12 +1848,12 @@ def create( thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - role: Union[str, _models.MessageRole] = _Unset, + role: Union[str, _models1.MessageRole] = _Unset, content: "_types.MessageInputContent" = _Unset, - attachments: Optional[List[_models.MessageAttachment]] = None, + attachments: Optional[List[_models1.MessageAttachment]] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models1.ThreadMessage: """Creates a new message on a specified thread. :param thread_id: Identifier of the thread. Required. @@ -1887,7 +1895,7 @@ def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadMessage] = kwargs.pop("cls", None) if body is _Unset: if role is _Unset: @@ -1930,12 +1938,13 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) + deserialized = _deserialize(_models1.ThreadMessage, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1949,10 +1958,10 @@ def list( *, run_id: Optional[str] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> Iterable["_models.ThreadMessage"]: + ) -> Iterable["_models1.ThreadMessage"]: """Gets a list of messages that exist on a thread. :param thread_id: Identifier of the thread. Required. @@ -1977,7 +1986,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.ThreadMessage]] = kwargs.pop("cls", None) + cls: ClsType[List[_models1.ThreadMessage]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2008,7 +2017,7 @@ def prepare_request(_continuation_token=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.ThreadMessage], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models1.ThreadMessage], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, iter(list_of_elem) @@ -2031,7 +2040,7 @@ def get_next(_continuation_token=None): return ItemPaged(get_next, extract_data) @distributed_trace - def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models1.ThreadMessage: """Retrieves an existing message. :param thread_id: Identifier of the thread. Required. @@ -2053,7 +2062,7 @@ def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadM _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadMessage] = kwargs.pop("cls", None) _request = build_messages_get_request( thread_id=thread_id, @@ -2081,12 +2090,13 @@ def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadM except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) + deserialized = _deserialize(_models1.ThreadMessage, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2102,7 +2112,7 @@ def update( content_type: str = "application/json", metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models1.ThreadMessage: """Modifies an existing message on an existing thread. :param thread_id: Identifier of the thread. Required. @@ -2125,7 +2135,7 @@ def update( @overload def update( self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models1.ThreadMessage: """Modifies an existing message on an existing thread. :param thread_id: Identifier of the thread. Required. @@ -2145,7 +2155,7 @@ def update( @overload def update( self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models1.ThreadMessage: """Modifies an existing message on an existing thread. :param thread_id: Identifier of the thread. Required. @@ -2171,7 +2181,7 @@ def update( *, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadMessage: + ) -> _models1.ThreadMessage: """Modifies an existing message on an existing thread. :param thread_id: Identifier of the thread. Required. @@ -2201,7 +2211,7 @@ def update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadMessage] = kwargs.pop("cls", None) if body is _Unset: body = {"metadata": metadata} @@ -2241,12 +2251,13 @@ def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) + deserialized = _deserialize(_models1.ThreadMessage, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2264,7 +2275,7 @@ class RunsOperations: :attr:`runs` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AgentsClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -2277,25 +2288,25 @@ def create( thread_id: str, *, agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models1.RunAdditionalFieldList]]] = None, content_type: str = "application/json", model: Optional[str] = None, instructions: Optional[str] = None, additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, + additional_messages: Optional[List[_models1.ThreadMessageOptions]] = None, + tools: Optional[List[_models1.ToolDefinition]] = None, stream_parameter: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, + truncation_strategy: Optional[_models1.TruncationObject] = None, tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Identifier of the thread. Required. @@ -2390,10 +2401,10 @@ def create( thread_id: str, body: JSON, *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models1.RunAdditionalFieldList]]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Identifier of the thread. Required. @@ -2419,10 +2430,10 @@ def create( thread_id: str, body: IO[bytes], *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models1.RunAdditionalFieldList]]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Identifier of the thread. Required. @@ -2449,24 +2460,24 @@ def create( body: Union[JSON, IO[bytes]] = _Unset, *, agent_id: str = _Unset, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models1.RunAdditionalFieldList]]] = None, model: Optional[str] = None, instructions: Optional[str] = None, additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, + additional_messages: Optional[List[_models1.ThreadMessageOptions]] = None, + tools: Optional[List[_models1.ToolDefinition]] = None, stream_parameter: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, + truncation_strategy: Optional[_models1.TruncationObject] = None, tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Identifier of the thread. Required. @@ -2565,7 +2576,7 @@ def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: if agent_id is _Unset: @@ -2624,12 +2635,13 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models1.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2642,10 +2654,10 @@ def list( thread_id: str, *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> Iterable["_models.ThreadRun"]: + ) -> Iterable["_models1.ThreadRun"]: """Gets a list of runs for a specified thread. :param thread_id: Identifier of the thread. Required. @@ -2668,7 +2680,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.ThreadRun]] = kwargs.pop("cls", None) + cls: ClsType[List[_models1.ThreadRun]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2698,7 +2710,7 @@ def prepare_request(_continuation_token=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.ThreadRun], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models1.ThreadRun], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, iter(list_of_elem) @@ -2714,14 +2726,15 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response return ItemPaged(get_next, extract_data) @distributed_trace - def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models1.ThreadRun: """Gets an existing run from an existing thread. :param thread_id: Identifier of the thread. Required. @@ -2743,7 +2756,7 @@ def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadRun] = kwargs.pop("cls", None) _request = build_runs_get_request( thread_id=thread_id, @@ -2771,12 +2784,13 @@ def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models1.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2792,7 +2806,7 @@ def update( content_type: str = "application/json", metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Modifies an existing thread run. :param thread_id: Identifier of the thread. Required. @@ -2815,7 +2829,7 @@ def update( @overload def update( self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Modifies an existing thread run. :param thread_id: Identifier of the thread. Required. @@ -2835,7 +2849,7 @@ def update( @overload def update( self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Modifies an existing thread run. :param thread_id: Identifier of the thread. Required. @@ -2861,7 +2875,7 @@ def update( *, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Modifies an existing thread run. :param thread_id: Identifier of the thread. Required. @@ -2891,7 +2905,7 @@ def update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: body = {"metadata": metadata} @@ -2931,12 +2945,13 @@ def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models1.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2949,11 +2964,11 @@ def submit_tool_outputs( thread_id: str, run_id: str, *, - tool_outputs: List[_models.ToolOutput], + tool_outputs: List[_models1.ToolOutput], content_type: str = "application/json", stream_parameter: Optional[bool] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. :param thread_id: Identifier of the thread. Required. @@ -2976,7 +2991,7 @@ def submit_tool_outputs( @overload def submit_tool_outputs( self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. :param thread_id: Identifier of the thread. Required. @@ -2996,7 +3011,7 @@ def submit_tool_outputs( @overload def submit_tool_outputs( self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. :param thread_id: Identifier of the thread. Required. @@ -3020,10 +3035,10 @@ def submit_tool_outputs( run_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - tool_outputs: List[_models.ToolOutput] = _Unset, + tool_outputs: List[_models1.ToolOutput] = _Unset, stream_parameter: Optional[bool] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. :param thread_id: Identifier of the thread. Required. @@ -3053,7 +3068,7 @@ def submit_tool_outputs( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: if tool_outputs is _Unset: @@ -3095,12 +3110,13 @@ def submit_tool_outputs( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models1.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3108,7 +3124,7 @@ def submit_tool_outputs( return deserialized # type: ignore @distributed_trace - def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models1.ThreadRun: """Cancels a run of an in‐progress thread. :param thread_id: Identifier of the thread. Required. @@ -3130,7 +3146,7 @@ def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRu _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadRun] = kwargs.pop("cls", None) _request = build_runs_cancel_request( thread_id=thread_id, @@ -3158,12 +3174,13 @@ def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRu except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models1.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3181,7 +3198,7 @@ class RunStepsOperations: :attr:`run_steps` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AgentsClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -3195,9 +3212,9 @@ def get( run_id: str, step_id: str, *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models1.RunAdditionalFieldList]]] = None, **kwargs: Any - ) -> _models.RunStep: + ) -> _models1.RunStep: """Retrieves a single run step from a thread run. :param thread_id: Identifier of the thread. Required. @@ -3226,7 +3243,7 @@ def get( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + cls: ClsType[_models1.RunStep] = kwargs.pop("cls", None) _request = build_run_steps_get_request( thread_id=thread_id, @@ -3256,12 +3273,13 @@ def get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.RunStep, response.json()) + deserialized = _deserialize(_models1.RunStep, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3274,12 +3292,12 @@ def list( thread_id: str, run_id: str, *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + include: Optional[List[Union[str, _models1.RunAdditionalFieldList]]] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> Iterable["_models.RunStep"]: + ) -> Iterable["_models1.RunStep"]: """Gets a list of run steps from a thread run. :param thread_id: Identifier of the thread. Required. @@ -3309,7 +3327,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.RunStep]] = kwargs.pop("cls", None) + cls: ClsType[List[_models1.RunStep]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3341,7 +3359,7 @@ def prepare_request(_continuation_token=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.RunStep], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models1.RunStep], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, iter(list_of_elem) @@ -3357,7 +3375,8 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3374,7 +3393,7 @@ class FilesOperations: :attr:`files` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AgentsClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -3383,8 +3402,8 @@ def __init__(self, *args, **kwargs): @distributed_trace def list( - self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any - ) -> _models.FileListResponse: + self, *, purpose: Optional[Union[str, _models1.FilePurpose]] = None, **kwargs: Any + ) -> _models1.FileListResponse: """Gets a list of previously uploaded files. :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", @@ -3406,7 +3425,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + cls: ClsType[_models1.FileListResponse] = kwargs.pop("cls", None) _request = build_files_list_request( purpose=purpose, @@ -3433,12 +3452,13 @@ def list( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileListResponse, response.json()) + deserialized = _deserialize(_models1.FileListResponse, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3446,12 +3466,12 @@ def list( return deserialized # type: ignore @overload - def _upload_file(self, body: _models._models.UploadFileRequest, **kwargs: Any) -> _models.FileInfo: ... + def _upload_file(self, body: _models1._models.UploadFileRequest, **kwargs: Any) -> _models1.FileInfo: ... @overload - def _upload_file(self, body: JSON, **kwargs: Any) -> _models.FileInfo: ... + def _upload_file(self, body: JSON, **kwargs: Any) -> _models1.FileInfo: ... @distributed_trace - def _upload_file(self, body: Union[_models._models.UploadFileRequest, JSON], **kwargs: Any) -> _models.FileInfo: + def _upload_file(self, body: Union[_models1._models.UploadFileRequest, JSON], **kwargs: Any) -> _models1.FileInfo: """Uploads a file for use by other operations. :param body: Multipart body. Is either a UploadFileRequest type or a JSON type. Required. @@ -3471,7 +3491,7 @@ def _upload_file(self, body: Union[_models._models.UploadFileRequest, JSON], **k _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileInfo] = kwargs.pop("cls", None) + cls: ClsType[_models1.FileInfo] = kwargs.pop("cls", None) _body = body.as_dict() if isinstance(body, _Model) else body _file_fields: List[str] = ["file"] @@ -3504,12 +3524,13 @@ def _upload_file(self, body: Union[_models._models.UploadFileRequest, JSON], **k except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileInfo, response.json()) + deserialized = _deserialize(_models1.FileInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3517,7 +3538,7 @@ def _upload_file(self, body: Union[_models._models.UploadFileRequest, JSON], **k return deserialized # type: ignore @distributed_trace - def delete(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + def delete(self, file_id: str, **kwargs: Any) -> _models1.FileDeletionStatus: """Delete a previously uploaded file. :param file_id: The ID of the file to delete. Required. @@ -3537,7 +3558,7 @@ def delete(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models1.FileDeletionStatus] = kwargs.pop("cls", None) _request = build_files_delete_request( file_id=file_id, @@ -3564,12 +3585,13 @@ def delete(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + deserialized = _deserialize(_models1.FileDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3577,7 +3599,7 @@ def delete(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: return deserialized # type: ignore @distributed_trace - def get(self, file_id: str, **kwargs: Any) -> _models.FileInfo: + def get(self, file_id: str, **kwargs: Any) -> _models1.FileInfo: """Returns information about a specific file. Does not retrieve file content. :param file_id: The ID of the file to retrieve. Required. @@ -3597,7 +3619,7 @@ def get(self, file_id: str, **kwargs: Any) -> _models.FileInfo: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileInfo] = kwargs.pop("cls", None) + cls: ClsType[_models1.FileInfo] = kwargs.pop("cls", None) _request = build_files_get_request( file_id=file_id, @@ -3624,12 +3646,13 @@ def get(self, file_id: str, **kwargs: Any) -> _models.FileInfo: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileInfo, response.json()) + deserialized = _deserialize(_models1.FileInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3684,7 +3707,8 @@ def _get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) deserialized = response.iter_bytes() @@ -3704,7 +3728,7 @@ class VectorStoresOperations: :attr:`vector_stores` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AgentsClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -3716,10 +3740,10 @@ def list( self, *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> Iterable["_models.VectorStore"]: + ) -> Iterable["_models1.VectorStore"]: """Returns a list of vector stores. :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and @@ -3740,7 +3764,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.VectorStore]] = kwargs.pop("cls", None) + cls: ClsType[List[_models1.VectorStore]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3769,7 +3793,7 @@ def prepare_request(_continuation_token=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.VectorStore], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models1.VectorStore], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, iter(list_of_elem) @@ -3785,7 +3809,8 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3798,12 +3823,12 @@ def create( content_type: str = "application/json", file_ids: Optional[List[str]] = None, name: Optional[str] = None, - store_configuration: Optional[_models.VectorStoreConfiguration] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + store_configuration: Optional[_models1.VectorStoreConfiguration] = None, + expires_after: Optional[_models1.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models1.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.VectorStore: + ) -> _models1.VectorStore: """Creates a vector store. :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -3833,7 +3858,7 @@ def create( """ @overload - def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.VectorStore: + def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models1.VectorStore: """Creates a vector store. :param body: Required. @@ -3847,7 +3872,7 @@ def create(self, body: JSON, *, content_type: str = "application/json", **kwargs """ @overload - def create(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.VectorStore: + def create(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models1.VectorStore: """Creates a vector store. :param body: Required. @@ -3867,12 +3892,12 @@ def create( *, file_ids: Optional[List[str]] = None, name: Optional[str] = None, - store_configuration: Optional[_models.VectorStoreConfiguration] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + store_configuration: Optional[_models1.VectorStoreConfiguration] = None, + expires_after: Optional[_models1.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models1.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.VectorStore: + ) -> _models1.VectorStore: """Creates a vector store. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -3911,7 +3936,7 @@ def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStore] = kwargs.pop("cls", None) if body is _Unset: body = { @@ -3956,12 +3981,13 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStore, response.json()) + deserialized = _deserialize(_models1.VectorStore, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3969,7 +3995,7 @@ def create( return deserialized # type: ignore @distributed_trace - def get(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + def get(self, vector_store_id: str, **kwargs: Any) -> _models1.VectorStore: """Returns the vector store object matching the specified ID. :param vector_store_id: Identifier of the vector store. Required. @@ -3989,7 +4015,7 @@ def get(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStore] = kwargs.pop("cls", None) _request = build_vector_stores_get_request( vector_store_id=vector_store_id, @@ -4016,12 +4042,13 @@ def get(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStore, response.json()) + deserialized = _deserialize(_models1.VectorStore, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4035,10 +4062,10 @@ def modify( *, content_type: str = "application/json", name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + expires_after: Optional[_models1.VectorStoreExpirationPolicy] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.VectorStore: + ) -> _models1.VectorStore: """Modifies an existing vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -4063,7 +4090,7 @@ def modify( @overload def modify( self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: + ) -> _models1.VectorStore: """Modifies an existing vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -4081,7 +4108,7 @@ def modify( @overload def modify( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: + ) -> _models1.VectorStore: """Modifies an existing vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -4103,10 +4130,10 @@ def modify( body: Union[JSON, IO[bytes]] = _Unset, *, name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + expires_after: Optional[_models1.VectorStoreExpirationPolicy] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.VectorStore: + ) -> _models1.VectorStore: """Modifies an existing vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -4138,7 +4165,7 @@ def modify( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStore] = kwargs.pop("cls", None) if body is _Unset: body = {"expires_after": expires_after, "metadata": metadata, "name": name} @@ -4177,12 +4204,13 @@ def modify( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStore, response.json()) + deserialized = _deserialize(_models1.VectorStore, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4190,7 +4218,7 @@ def modify( return deserialized # type: ignore @distributed_trace - def delete(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + def delete(self, vector_store_id: str, **kwargs: Any) -> _models1.VectorStoreDeletionStatus: """Deletes the vector store object matching the specified ID. :param vector_store_id: Identifier of the vector store. Required. @@ -4211,7 +4239,7 @@ def delete(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDele _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStoreDeletionStatus] = kwargs.pop("cls", None) _request = build_vector_stores_delete_request( vector_store_id=vector_store_id, @@ -4238,12 +4266,13 @@ def delete(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDele except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + deserialized = _deserialize(_models1.VectorStoreDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4261,7 +4290,7 @@ class VectorStoreFilesOperations: :attr:`vector_store_files` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AgentsClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -4273,12 +4302,12 @@ def list( self, vector_store_id: str, *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + filter: Optional[Union[str, _models1.VectorStoreFileStatusFilter]] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> Iterable["_models.VectorStoreFile"]: + ) -> Iterable["_models1.VectorStoreFile"]: """Returns a list of vector store files. :param vector_store_id: Identifier of the vector store. Required. @@ -4304,7 +4333,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.VectorStoreFile]] = kwargs.pop("cls", None) + cls: ClsType[List[_models1.VectorStoreFile]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4335,7 +4364,7 @@ def prepare_request(_continuation_token=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.VectorStoreFile], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models1.VectorStoreFile], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, iter(list_of_elem) @@ -4351,7 +4380,8 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response @@ -4364,10 +4394,10 @@ def create( *, content_type: str = "application/json", file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + data_source: Optional[_models1.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models1.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any - ) -> _models.VectorStoreFile: + ) -> _models1.VectorStoreFile: """Create a vector store file by attaching a file to a vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -4390,7 +4420,7 @@ def create( @overload def create( self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: + ) -> _models1.VectorStoreFile: """Create a vector store file by attaching a file to a vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -4408,7 +4438,7 @@ def create( @overload def create( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: + ) -> _models1.VectorStoreFile: """Create a vector store file by attaching a file to a vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -4430,10 +4460,10 @@ def create( body: Union[JSON, IO[bytes]] = _Unset, *, file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + data_source: Optional[_models1.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models1.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any - ) -> _models.VectorStoreFile: + ) -> _models1.VectorStoreFile: """Create a vector store file by attaching a file to a vector store. :param vector_store_id: Identifier of the vector store. Required. @@ -4463,7 +4493,7 @@ def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStoreFile] = kwargs.pop("cls", None) if body is _Unset: body = {"chunking_strategy": chunking_strategy, "data_source": data_source, "file_id": file_id} @@ -4502,12 +4532,13 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) + deserialized = _deserialize(_models1.VectorStoreFile, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4515,7 +4546,7 @@ def create( return deserialized # type: ignore @distributed_trace - def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models1.VectorStoreFile: """Retrieves a vector store file. :param vector_store_id: Identifier of the vector store. Required. @@ -4537,7 +4568,7 @@ def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.Vect _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStoreFile] = kwargs.pop("cls", None) _request = build_vector_store_files_get_request( vector_store_id=vector_store_id, @@ -4565,12 +4596,13 @@ def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.Vect except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) + deserialized = _deserialize(_models1.VectorStoreFile, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4578,7 +4610,7 @@ def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.Vect return deserialized # type: ignore @distributed_trace - def delete(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFileDeletionStatus: + def delete(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models1.VectorStoreFileDeletionStatus: """Deletes a vector store file. This removes the file‐to‐store link (does not delete the file itself). @@ -4602,7 +4634,7 @@ def delete(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.V _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) _request = build_vector_store_files_delete_request( vector_store_id=vector_store_id, @@ -4630,12 +4662,13 @@ def delete(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.V except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + deserialized = _deserialize(_models1.VectorStoreFileDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4653,7 +4686,7 @@ class VectorStoreFileBatchesOperations: :attr:`vector_store_file_batches` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AgentsClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -4667,10 +4700,10 @@ def create( *, content_type: str = "application/json", file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + data_sources: Optional[List[_models1.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models1.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any - ) -> _models.VectorStoreFileBatch: + ) -> _models1.VectorStoreFileBatch: """Create a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -4693,7 +4726,7 @@ def create( @overload def create( self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: + ) -> _models1.VectorStoreFileBatch: """Create a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -4711,7 +4744,7 @@ def create( @overload def create( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: + ) -> _models1.VectorStoreFileBatch: """Create a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -4733,10 +4766,10 @@ def create( body: Union[JSON, IO[bytes]] = _Unset, *, file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + data_sources: Optional[List[_models1.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models1.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any - ) -> _models.VectorStoreFileBatch: + ) -> _models1.VectorStoreFileBatch: """Create a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -4766,7 +4799,7 @@ def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStoreFileBatch] = kwargs.pop("cls", None) if body is _Unset: body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_ids": file_ids} @@ -4805,12 +4838,13 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + deserialized = _deserialize(_models1.VectorStoreFileBatch, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4818,7 +4852,7 @@ def create( return deserialized # type: ignore @distributed_trace - def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models.VectorStoreFileBatch: + def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models1.VectorStoreFileBatch: """Retrieve a vector store file batch. :param vector_store_id: Identifier of the vector store. Required. @@ -4840,7 +4874,7 @@ def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models.Vec _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStoreFileBatch] = kwargs.pop("cls", None) _request = build_vector_store_file_batches_get_request( vector_store_id=vector_store_id, @@ -4868,12 +4902,13 @@ def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models.Vec except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + deserialized = _deserialize(_models1.VectorStoreFileBatch, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4881,7 +4916,7 @@ def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models.Vec return deserialized # type: ignore @distributed_trace - def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models.VectorStoreFileBatch: + def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models1.VectorStoreFileBatch: """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. @@ -4904,7 +4939,7 @@ def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models. _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + cls: ClsType[_models1.VectorStoreFileBatch] = kwargs.pop("cls", None) _request = build_vector_store_file_batches_cancel_request( vector_store_id=vector_store_id, @@ -4932,12 +4967,13 @@ def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models. except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + deserialized = _deserialize(_models1.VectorStoreFileBatch, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4950,12 +4986,12 @@ def list_files( vector_store_id: str, batch_id: str, *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + filter: Optional[Union[str, _models1.VectorStoreFileStatusFilter]] = None, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> Iterable["_models.VectorStoreFile"]: + ) -> Iterable["_models1.VectorStoreFile"]: """Returns a list of vector store files in a batch. :param vector_store_id: Identifier of the vector store. Required. @@ -4983,7 +5019,7 @@ def list_files( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.VectorStoreFile]] = kwargs.pop("cls", None) + cls: ClsType[List[_models1.VectorStoreFile]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -5003,6 +5039,7 @@ def prepare_request(_continuation_token=None): order=order, after=_continuation_token, before=before, + api_version=self._config.api_version, headers=_headers, params=_params, ) @@ -5014,7 +5051,7 @@ def prepare_request(_continuation_token=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.VectorStoreFile], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models1.VectorStoreFile], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, iter(list_of_elem) @@ -5030,14 +5067,15 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response return ItemPaged(get_next, extract_data) -class AgentsClientOperationsMixin(ClientMixinABC[PipelineClient, AgentsClientConfiguration]): +class AgentsClientOperationsMixin(ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], AgentsClientConfiguration]): @overload def create_agent( @@ -5048,14 +5086,14 @@ def create_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, + tools: Optional[List[_models1.ToolDefinition]] = None, + tool_resources: Optional[_models1.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: + ) -> _models1.Agent: """Creates a new agent. :keyword model: The ID of the model to use. Required. @@ -5104,7 +5142,7 @@ def create_agent( """ @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models1.Agent: """Creates a new agent. :param body: Required. @@ -5118,7 +5156,7 @@ def create_agent(self, body: JSON, *, content_type: str = "application/json", ** """ @overload - def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models1.Agent: """Creates a new agent. :param body: Required. @@ -5140,14 +5178,14 @@ def create_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, + tools: Optional[List[_models1.ToolDefinition]] = None, + tool_resources: Optional[_models1.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: + ) -> _models1.Agent: """Creates a new agent. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -5205,7 +5243,7 @@ def create_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models1.Agent] = kwargs.pop("cls", None) if body is _Unset: if model is _Unset: @@ -5256,12 +5294,13 @@ def create_agent( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models1.Agent, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -5273,10 +5312,10 @@ def list_agents( self, *, limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, + order: Optional[Union[str, _models1.ListSortOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> Iterable["_models.Agent"]: + ) -> Iterable["_models1.Agent"]: """Gets a list of agents that were previously created. :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and @@ -5297,7 +5336,7 @@ def list_agents( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Agent]] = kwargs.pop("cls", None) + cls: ClsType[List[_models1.Agent]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -5326,7 +5365,7 @@ def prepare_request(_continuation_token=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Agent], deserialized.get("data", [])) + list_of_elem = _deserialize(List[_models1.Agent], deserialized.get("data", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("last_id") or None, iter(list_of_elem) @@ -5342,14 +5381,15 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) return pipeline_response return ItemPaged(get_next, extract_data) @distributed_trace - def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: + def get_agent(self, agent_id: str, **kwargs: Any) -> _models1.Agent: """Retrieves an existing agent. :param agent_id: Identifier of the agent. Required. @@ -5369,7 +5409,7 @@ def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models1.Agent] = kwargs.pop("cls", None) _request = build_agents_get_agent_request( agent_id=agent_id, @@ -5396,12 +5436,13 @@ def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models1.Agent, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -5418,14 +5459,14 @@ def update_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, + tools: Optional[List[_models1.ToolDefinition]] = None, + tool_resources: Optional[_models1.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: + ) -> _models1.Agent: """Modifies an existing agent. :param agent_id: The ID of the agent to modify. Required. @@ -5480,7 +5521,7 @@ def update_agent( @overload def update_agent( self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: + ) -> _models1.Agent: """Modifies an existing agent. :param agent_id: The ID of the agent to modify. Required. @@ -5498,7 +5539,7 @@ def update_agent( @overload def update_agent( self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: + ) -> _models1.Agent: """Modifies an existing agent. :param agent_id: The ID of the agent to modify. Required. @@ -5523,14 +5564,14 @@ def update_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, + tools: Optional[List[_models1.ToolDefinition]] = None, + tool_resources: Optional[_models1.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: + ) -> _models1.Agent: """Modifies an existing agent. :param agent_id: The ID of the agent to modify. Required. @@ -5592,7 +5633,7 @@ def update_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models1.Agent] = kwargs.pop("cls", None) if body is _Unset: body = { @@ -5642,12 +5683,13 @@ def update_agent( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models1.Agent, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -5655,7 +5697,7 @@ def update_agent( return deserialized # type: ignore @distributed_trace - def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + def delete_agent(self, agent_id: str, **kwargs: Any) -> _models1.AgentDeletionStatus: """Deletes an agent. :param agent_id: Identifier of the agent. Required. @@ -5675,7 +5717,7 @@ def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionSta _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models1.AgentDeletionStatus] = kwargs.pop("cls", None) _request = build_agents_delete_agent_request( agent_id=agent_id, @@ -5702,12 +5744,13 @@ def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionSta except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + deserialized = _deserialize(_models1.AgentDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -5720,23 +5763,23 @@ def create_thread_and_run( *, agent_id: str, content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, + thread: Optional[_models1.AgentThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + tools: Optional[List[_models1.ToolDefinition]] = None, + tool_resources: Optional[_models1.UpdateToolResourcesOptions] = None, stream_parameter: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, + truncation_strategy: Optional[_models1.TruncationObject] = None, tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. :keyword agent_id: The ID of the agent for which the thread should be created. Required. @@ -5820,7 +5863,7 @@ def create_thread_and_run( @overload def create_thread_and_run( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. :param body: Required. @@ -5836,7 +5879,7 @@ def create_thread_and_run( @overload def create_thread_and_run( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. :param body: Required. @@ -5855,23 +5898,23 @@ def create_thread_and_run( body: Union[JSON, IO[bytes]] = _Unset, *, agent_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, + thread: Optional[_models1.AgentThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + tools: Optional[List[_models1.ToolDefinition]] = None, + tool_resources: Optional[_models1.UpdateToolResourcesOptions] = None, stream_parameter: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, + truncation_strategy: Optional[_models1.TruncationObject] = None, tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, response_format: Optional["_types.AgentsResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.ThreadRun: + ) -> _models1.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -5962,7 +6005,7 @@ def create_thread_and_run( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + cls: ClsType[_models1.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: if agent_id is _Unset: @@ -6019,12 +6062,13 @@ def create_thread_and_run( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize(_servicepatterns_models4.AgentV1Error, response.json()) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ThreadRun, response.json()) + deserialized = _deserialize(_models1.ThreadRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_patch.py b/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_patch.py index 77d4bd0502ee..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_patch.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_patch.py @@ -1,2228 +1,15 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import ast -import io -import logging -import os -import sys -import time -import json -from pathlib import Path -from typing import ( - IO, - TYPE_CHECKING, - Any, - Dict, - Iterator, - List, - Optional, - Union, - cast, - overload, -) +from typing import List -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from ..models._enums import FilePurpose, RunStatus -from ._operations import FilesOperations as FilesOperationsGenerated -from ._operations import RunsOperations as RunsOperationsGenerated -from ._operations import MessagesOperations as MessagesOperationsGenerated -from ._operations import VectorStoresOperations as VectorStoresOperationsGenerated -from ._operations import VectorStoreFilesOperations as VectorStoreFilesOperationsGenerated -from ._operations import VectorStoreFileBatchesOperations as VectorStoreFileBatchesOperationsGenerated -from .._utils.utils import FileType - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from .. import _types - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - -logger = logging.getLogger(__name__) - - -def _has_errors_in_toolcalls_output(tool_outputs: List[Dict]) -> bool: - """ - Check if any tool output contains an error. - - :param List[Dict] tool_outputs: A list of tool outputs to check. - :return: True if any output contains an error, False otherwise. - :rtype: bool - """ - for tool_output in tool_outputs: - output = tool_output.get("output") - if isinstance(output, str): - try: - output_json = json.loads(output) - if "error" in output_json: - return True - except json.JSONDecodeError: - continue - return False - - -class RunsOperations(RunsOperationsGenerated): - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - - # if the client didn't inject these for some reason, give safe defaults: - if not hasattr(self, "_function_tool"): - self._function_tool = _models.FunctionTool(set()) - if not hasattr(self, "_function_tool_max_retry"): - self._function_tool_max_retry = 0 - - # pylint: disable=arguments-differ - @overload - def create( # pylint: disable=arguments-differ - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, - thread_id: str, - body: IO[bytes], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - agent_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif agent_id is not _Unset: # Handle overload with keyword arguments. - response = super().create( - thread_id, - include=include, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return response - - @distributed_trace - def create_and_process( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - polling_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessageOptions] - :keyword toolset: The Collection of tools and resources (alternative to `tools` and - `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.agents.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.agents.models.AgentsApiResponseFormatMode or - ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword polling_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype polling_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = self.create( - thread_id=thread_id, - include=include, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=toolset.definitions if toolset else None, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - current_retry = 0 - while run.status in [ - RunStatus.QUEUED, - RunStatus.IN_PROGRESS, - RunStatus.REQUIRES_ACTION, - ]: - time.sleep(polling_interval) - run = self.get(thread_id=thread_id, run_id=run.id) - - if run.status == RunStatus.REQUIRES_ACTION and isinstance( - run.required_action, _models.SubmitToolOutputsAction - ): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.warning("No tool calls provided - cancelling run") - self.cancel(thread_id=thread_id, run_id=run.id) - break - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = _models.ToolSet() - toolset.add(self._function_tool) - tool_outputs = toolset.execute_tool_calls(tool_calls) - - if _has_errors_in_toolcalls_output(tool_outputs): - if current_retry >= self._function_tool_max_retry: # pylint:disable=no-else-return - logger.warning( - "Tool outputs contain errors - reaching max retry %s", self._function_tool_max_retry - ) - return self.cancel(thread_id=thread_id, run_id=run.id) - else: - logger.warning("Tool outputs contain errors - retrying") - current_retry += 1 - - logger.debug("Tool outputs: %s", tool_outputs) - if tool_outputs: - run2 = self.submit_tool_outputs(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) - logger.debug("Tool outputs submitted to run: %s", run2.id) - - logger.debug("Current run ID: %s with status: %s", run.id, run.status) - - return run - - @overload - def stream( - self, - thread_id: str, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - agent_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: None = None, - **kwargs: Any, - ) -> _models.AgentRunStream[_models.AgentEventHandler]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: None - :paramtype event_handler: None. _models.AgentEventHandler will be applied as default. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def stream( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: _models.BaseAgentEventHandlerT, - **kwargs: Any, - ) -> _models.AgentRunStream[_models.BaseAgentEventHandlerT]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.agents.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - event_handler: None = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AgentRunStream[_models.AgentEventHandler]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword event_handler: None - :paramtype event_handler: None. _models.AgentEventHandler will be applied as default. - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - event_handler: _models.BaseAgentEventHandlerT, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AgentRunStream[_models.BaseAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.agents.models.AgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - agent_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsToolChoiceOption"] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.BaseAgentEventHandlerT] = None, - **kwargs: Any, - ) -> _models.AgentRunStream[_models.BaseAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.agents.models.RunAdditionalFieldList] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.agents.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.agents.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.agents.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.agents.models.AgentsToolChoiceOptionMode or - ~azure.ai.agents.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.agents.models.AgentsApiResponseFormatMode - or ~azure.ai.agents.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.agents.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.agents.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif agent_id is not _Unset: # Handle overload with keyword arguments. - response = super().create( - thread_id, - include=include, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - if not event_handler: - event_handler = cast(_models.BaseAgentEventHandlerT, _models.AgentEventHandler()) - if isinstance(event_handler, _models.AgentEventHandler): - event_handler.set_max_retry(self._function_tool_max_retry) - return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - # pylint: disable=arguments-differ - @overload - def submit_tool_outputs( # pylint: disable=arguments-differ - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.agents.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.agents.models.AgentEventHandler - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.agents.models.ToolOutput] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs( - thread_id, - run_id, - tool_outputs=tool_outputs, - stream_parameter=False, - stream=False, - **kwargs, - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return response - - @overload - def submit_tool_outputs_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]], - *, - event_handler: _models.BaseAgentEventHandler, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.agents.models.BaseAgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: _models.BaseAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.agents.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.agents.models.BaseAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: _models.BaseAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.agents.models.ToolOutput] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.agents.models.BaseAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) - - def _handle_submit_tool_outputs( - self, run: _models.ThreadRun, event_handler: _models.BaseAgentEventHandler, submit_with_error: bool - ) -> Any: - tool_outputs: Any = [] - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return tool_outputs - - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if ( - any(tool_call.type == "function" for tool_call in tool_calls) - and len(self._function_tool.definitions) > 0 - ): - - toolset = _models.ToolSet() - toolset.add(self._function_tool) - tool_outputs = toolset.execute_tool_calls(tool_calls) - - if _has_errors_in_toolcalls_output(tool_outputs): - if submit_with_error: - logger.warning("Tool outputs contain errors - retrying") - else: - logger.warning("Tool outputs contain errors - reaching max retry limit") - - response = self.cancel(thread_id=run.thread_id, run_id=run.id) - response_json = ast.literal_eval(str(response)) - response_json_str = json.dumps(response_json) - - event_data_str = f"event: thread.run.cancelled\ndata: {response_json_str}" - byte_string = event_data_str.encode("utf-8") - - event_handler.initialize(iter([byte_string]), self._handle_submit_tool_outputs) - - return tool_outputs - - logger.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - self.submit_tool_outputs_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=event_handler, - ) - return tool_outputs - - -class FilesOperations(FilesOperationsGenerated): - - # pylint: disable=arguments-differ - @overload - def upload( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.agents.models.FilePurpose - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def upload( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.agents._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.agents.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload(self, body: JSON, **kwargs: Any) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.FileInfo: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: _models.FileInfo - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - # If a JSON body is provided directly, pass it along - if body is not None: - return super()._upload_file(body=body, **kwargs) - - # Convert FilePurpose enum to string if necessary - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - # If file content is passed in directly - if file is not None and purpose is not None: - return super()._upload_file(body={"file": file, "purpose": purpose, "filename": filename}, **kwargs) - - # If a file path is provided - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # If no explicit filename is provided, use the base name - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return super()._upload_file(body={"file": file_content, "purpose": purpose}, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}") from e - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - def upload_and_poll( - self, body: JSON, *, polling_interval: float = 1, timeout: Optional[float] = None, **kwargs: Any - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword polling_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the uploaded file. - :paramtype timeout: float - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @overload - def upload_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.agents._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.agents.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword polling_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the uploaded file. - :paramtype timeout: float - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @overload - def upload_and_poll( - self, - *, - file_path: str, - purpose: Union[str, _models.FilePurpose], - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.FileInfo: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.agents.models.FilePurpose - :keyword polling_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the uploaded file. - :paramtype timeout: float - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.FileInfo - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @distributed_trace - def upload_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.FileInfo: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :keyword polling_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the uploaded file. - :paramtype timeout: float - :return: FileInfo. The FileInfo is compatible with MutableMapping - :rtype: _models.FileInfo - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - :raises TimeoutError: If the operation times out while polling for status. - """ - - curr_time = time.monotonic() - if body is not None: - uploaded_file = self.upload(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = self.upload(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = self.upload(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - - if timeout is not None and (time.monotonic() - curr_time - polling_interval) >= timeout: - raise TimeoutError("Timeout reached. Stopping polling.") - - time.sleep(polling_interval) - uploaded_file = self.get(uploaded_file.id) - - return uploaded_file - - @distributed_trace - def get_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: - """ - Returns file content as byte stream for given file_id. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: An iterator that yields bytes from the file content. - :rtype: Iterator[bytes] - :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. - """ - kwargs["stream"] = True - response = super()._get_file_content(file_id, **kwargs) - return cast(Iterator[bytes], response) - - @distributed_trace - def save(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: - """ - Synchronously saves file content retrieved using a file identifier to the specified local directory. - - :param file_id: The unique identifier for the file to retrieve. - :type file_id: str - :param file_name: The name of the file to be saved. - :type file_name: str - :param target_dir: The directory where the file should be saved. Defaults to the current working directory. - :type target_dir: Optional[Union[str, Path]] - :raises ValueError: If the target path is not a directory or the file name is invalid. - :raises RuntimeError: If file content retrieval fails or no content is found. - :raises TypeError: If retrieved chunks are not bytes-like objects. - :raises IOError: If writing to the file fails. - """ - try: - # Determine and validate the target directory - path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() - path.mkdir(parents=True, exist_ok=True) - if not path.is_dir(): - raise ValueError(f"The target path '{path}' is not a directory.") - - # Sanitize and validate the file name - sanitized_file_name = Path(file_name).name - if not sanitized_file_name: - raise ValueError("The provided file name is invalid.") - - # Retrieve the file content - file_content_stream = self.get_content(file_id) - if not file_content_stream: - raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") - - target_file_path = path / sanitized_file_name - - # Write the file content to disk - with target_file_path.open("wb") as file: - for chunk in file_content_stream: - if isinstance(chunk, (bytes, bytearray)): - file.write(chunk) - else: - raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - - logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) - - except (ValueError, RuntimeError, TypeError, IOError) as e: - logger.error("An error occurred in save_file: %s", e) - raise - - -class VectorStoresOperations(VectorStoresOperationsGenerated): - - @overload - def create_and_poll( - self, - body: JSON, - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @overload - def create_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.agents.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.agents.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @overload - def create_and_poll( - self, - body: IO[bytes], - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @distributed_trace - def create_and_poll( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.agents.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.agents.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - curr_time = time.monotonic() - - if body is not _Unset: - if isinstance(body, dict): - vector_store = super().create(body=body, content_type=content_type or "application/json", **kwargs) - elif isinstance(body, io.IOBase): - vector_store = super().create(body=body, content_type=content_type, **kwargs) - else: - raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") - else: - store_configuration = None - if data_sources: - store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) - - vector_store = super().create( - file_ids=file_ids, - store_configuration=store_configuration, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - - while vector_store.status == "in_progress": - - if timeout is not None and (time.monotonic() - curr_time - polling_interval) >= timeout: - raise TimeoutError("Timeout reached. Stopping polling.") - - time.sleep(polling_interval) - vector_store = super().get(vector_store.id) - - return vector_store - - -class VectorStoreFileBatchesOperations(VectorStoreFileBatchesOperationsGenerated): - - @overload - def create_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @overload - def create_and_poll( - self, - vector_store_id: str, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.agents.models.VectorStoreDataSource] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @overload - def create_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @distributed_trace - def create_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword content_type: Body parameter content-type. Defaults to "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - curr_time = time.monotonic() - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file_batch = super().create( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file_batch = super().create( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") - else: - vector_store_file_batch = super().create( - vector_store_id=vector_store_id, - file_ids=file_ids, - data_sources=data_sources, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file_batch.status == "in_progress": - - if timeout is not None and (time.monotonic() - curr_time - polling_interval) >= timeout: - raise TimeoutError("Timeout reached. Stopping polling.") - - time.sleep(polling_interval) - vector_store_file_batch = super().get(vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id) - - return vector_store_file_batch - - -class VectorStoreFilesOperations(VectorStoreFilesOperationsGenerated): - - @overload - def create_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @overload - def create_and_poll( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.agents.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @overload - def create_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - :raises TimeoutError: If the operation times out while polling for status. - """ - - @distributed_trace - def create_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - polling_interval: float = 1, - timeout: Optional[float] = None, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.agents.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.agents.models.VectorStoreChunkingStrategyRequest - :keyword polling_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype polling_interval: float - :keyword timeout: Time to wait before polling for the status of the vector store. - :paramtype timeout: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.agents.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - :raise TimeoutError: If the operation times out while polling for status. - """ - - curr_time = time.monotonic() - if body is not _Unset: - if isinstance(body, dict): - vector_store_file = super().create( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file = super().create( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") - else: - vector_store_file = super().create( - vector_store_id=vector_store_id, - file_id=file_id, - data_source=data_source, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file.status == "in_progress": - - if timeout is not None and (time.monotonic() - curr_time - polling_interval) >= timeout: - raise TimeoutError("Timeout reached. Stopping polling.") - - time.sleep(polling_interval) - vector_store_file = super().get(vector_store_id=vector_store_id, file_id=vector_store_file.id) - - return vector_store_file - - -class MessagesOperations(MessagesOperationsGenerated): - - def get_last_message_by_role( - self, - thread_id: str, - role: _models.MessageRole, - **kwargs, - ) -> Optional[_models.ThreadMessage]: - """ - Return the most-recent message in *thread_id* authored by *role*. - - The implementation streams messages (newest first, where the - service/SDK supports that) and stops at the first match. - - :param thread_id: The ID of the thread to search. - :type thread_id: str - :param role: The role of the message author. - :type role: ~azure.ai.agents.models.MessageRole - - :return: The most recent message authored by *role* in the thread, or None if no such message exists. - :rtype: Optional[~azure.ai.agents.models.ThreadMessage] - """ - pageable = self.list(thread_id, **kwargs) # type: ignore[arg-type] - - for message in pageable: - if message.role == role: - return message - return None - - def get_last_text_message_by_role( - self, - thread_id: str, - role: _models.MessageRole, - **kwargs, - ) -> Optional[_models.MessageTextContent]: - """ - Return the most-recent *text* message in *thread_id* authored by *role*. - - :param thread_id: The ID of the thread to search. - :type thread_id: str - :param role: The role of the message author. - :type role: ~azure.ai.agents.models.MessageRole - - :return: The most recent text message authored by *role* in the thread, or None if no such message exists. - :rtype: Optional[~azure.ai.agents.models.MessageTextContent] - """ - msg = self.get_last_message_by_role(thread_id, role, **kwargs) - if msg: - text_contents = msg.text_messages - if text_contents: - return text_contents[-1] - return None - - -__all__: List[str] = [ - "MessagesOperations", - "RunsOperations", - "FilesOperations", - "VectorStoresOperations", - "VectorStoreFilesOperations", - "VectorStoreFileBatchesOperations", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/__init__.py b/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/__init__.py deleted file mode 100644 index 9b00ace43af8..000000000000 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._ai_agents_instrumentor import AIAgentsInstrumentor -from ._trace_function import trace_function - - -__all__ = ["AIAgentsInstrumentor", "trace_function"] diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_ai_agents_instrumentor.py deleted file mode 100644 index f1cdbd39d98a..000000000000 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_ai_agents_instrumentor.py +++ /dev/null @@ -1,2168 +0,0 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import copy -import functools -import importlib -import json -import logging -import os -from datetime import datetime -from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast, TYPE_CHECKING -from urllib.parse import urlparse - -from azure.ai.agents.models import AgentRunStream, AsyncAgentRunStream, RunStepMessageCreationDetails, _models -from azure.ai.agents.models._enums import ( - AgentsResponseFormatMode, - MessageRole, - RunStepStatus, -) -from azure.ai.agents.models import ( - MessageAttachment, - MessageDeltaChunk, - MessageIncompleteDetails, - RunStep, - RunStepDeltaChunk, - RunStepError, - RunStepFunctionToolCall, - RunStepToolCallDetails, - RunStepCodeInterpreterToolCall, - RunStepBingGroundingToolCall, - ThreadMessage, - ThreadRun, - ToolDefinition, - ToolOutput, - ToolResources, -) -from azure.ai.agents.models._patch import ( - AgentEventHandler, - AsyncAgentEventHandler, - ToolSet, -) -from azure.core import CaseInsensitiveEnumMeta # type: ignore -from azure.core.settings import settings -from azure.core.tracing import AbstractSpan -from ._utils import ( - AZ_AI_AGENT_SYSTEM, - ERROR_TYPE, - GEN_AI_AGENT_DESCRIPTION, - GEN_AI_AGENT_ID, - GEN_AI_AGENT_NAME, - GEN_AI_EVENT_CONTENT, - GEN_AI_MESSAGE_ID, - GEN_AI_MESSAGE_STATUS, - GEN_AI_RESPONSE_MODEL, - GEN_AI_SYSTEM, - GEN_AI_SYSTEM_MESSAGE, - GEN_AI_THREAD_ID, - GEN_AI_THREAD_RUN_ID, - GEN_AI_THREAD_RUN_STATUS, - GEN_AI_USAGE_INPUT_TOKENS, - GEN_AI_USAGE_OUTPUT_TOKENS, - GEN_AI_RUN_STEP_START_TIMESTAMP, - GEN_AI_RUN_STEP_END_TIMESTAMP, - GEN_AI_RUN_STEP_STATUS, - ERROR_MESSAGE, - OperationName, - start_span, -) -from ._instrument_paged_wrappers import _AsyncInstrumentedItemPaged, _InstrumentedItemPaged - - -_Unset: Any = object() - -logger = logging.getLogger(__name__) - -try: - # pylint: disable = no-name-in-module - from opentelemetry.trace import Span, StatusCode - - _tracing_library_available = True -except ModuleNotFoundError: - _tracing_library_available = False - -if TYPE_CHECKING: - from .. import _types - -__all__ = [ - "AIAgentsInstrumentor", -] - -_agents_traces_enabled: bool = False -_trace_agents_content: bool = False - - -class TraceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): # pylint: disable=C4747 - """An enumeration class to represent different types of traces.""" - - AGENTS = "Agents" - - -class AIAgentsInstrumentor: - """ - A class for managing the trace instrumentation of AI Agents. - - This class allows enabling or disabling tracing for AI Agents. - and provides functionality to check whether instrumentation is active. - - """ - - def __init__(self): - if not _tracing_library_available: - raise ModuleNotFoundError( - "Azure Core Tracing Opentelemetry is not installed. " - "Please install it using 'pip install azure-core-tracing-opentelemetry'" - ) - # In the future we could support different versions from the same library - # and have a parameter that specifies the version to use. - self._impl = _AIAgentsInstrumentorPreview() - - def instrument(self, enable_content_recording: Optional[bool] = None) -> None: - """ - Enable trace instrumentation for AI Agents. - - :param enable_content_recording: Whether content recording is enabled as part - of the traces or not. Content in this context refers to chat message content - and function call tool related function names, function parameter names and - values. True will enable content recording, False will disable it. If no value - is provided, then the value read from environment variable - AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable - is not found, then the value will default to False. Please note that successive calls - to instrument will always apply the content recording value provided with the most - recent call to instrument (including applying the environment variable if no value is - provided and defaulting to false if the environment variable is not found), even if - instrument was already previously called without uninstrument being called in between - the instrument calls. - :type enable_content_recording: bool, optional - - """ - self._impl.instrument(enable_content_recording) - - def uninstrument(self) -> None: - """ - Remove trace instrumentation for AI Agents. - - This method removes any active instrumentation, stopping the tracing - of AI Agents. - """ - self._impl.uninstrument() - - def is_instrumented(self) -> bool: - """ - Check if trace instrumentation for AI Agents is currently enabled. - - :return: True if instrumentation is active, False otherwise. - :rtype: bool - """ - return self._impl.is_instrumented() - - def is_content_recording_enabled(self) -> bool: - """This function gets the content recording value. - - :return: A bool value indicating whether content recording is enabled. - :rtype: bool - """ - return self._impl.is_content_recording_enabled() - - -class _AIAgentsInstrumentorPreview: - # pylint: disable=R0904 - """ - A class for managing the trace instrumentation of AI Agents. - - This class allows enabling or disabling tracing for AI Agents. - and provides functionality to check whether instrumentation is active. - """ - - def _str_to_bool(self, s): - if s is None: - return False - return str(s).lower() == "true" - - def instrument(self, enable_content_recording: Optional[bool] = None): - """ - Enable trace instrumentation for AI Agents. - - :param enable_content_recording: Whether content recording is enabled as part - of the traces or not. Content in this context refers to chat message content - and function call tool related function names, function parameter names and - values. True will enable content recording, False will disable it. If no value - is provided, then the value read from environment variable - AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable - is not found, then the value will default to False. - - :type enable_content_recording: bool, optional - """ - if enable_content_recording is None: - var_value = os.environ.get("AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED") - enable_content_recording = self._str_to_bool(var_value) - if not self.is_instrumented(): - self._instrument_agents(enable_content_recording) - else: - self._set_enable_content_recording(enable_content_recording=enable_content_recording) - - def uninstrument(self): - """ - Disable trace instrumentation for AI Agents. - - This method removes any active instrumentation, stopping the tracing - of AI Agents. - """ - if self.is_instrumented(): - self._uninstrument_agents() - - def is_instrumented(self): - """ - Check if trace instrumentation for AI Agents is currently enabled. - - :return: True if instrumentation is active, False otherwise. - :rtype: bool - """ - return self._is_instrumented() - - def set_enable_content_recording(self, enable_content_recording: bool = False) -> None: - """This function sets the content recording value. - - :param enable_content_recording: Indicates whether tracing of message content should be enabled. - This also controls whether function call tool function names, - parameter names and parameter values are traced. - :type enable_content_recording: bool - """ - self._set_enable_content_recording(enable_content_recording=enable_content_recording) - - def is_content_recording_enabled(self) -> bool: - """This function gets the content recording value. - - :return: A bool value indicating whether content tracing is enabled. - :rtype bool - """ - return self._is_content_recording_enabled() - - def _set_attributes(self, span: "AbstractSpan", *attrs: Tuple[str, Any]) -> None: - for attr in attrs: - key, value = attr - if value is not None: - span.add_attribute(key, value) - - def _parse_url(self, url): - parsed = urlparse(url) - server_address = parsed.hostname - port = parsed.port - return server_address, port - - def _remove_function_call_names_and_arguments(self, tool_calls: list) -> list: - tool_calls_copy = copy.deepcopy(tool_calls) - for tool_call in tool_calls_copy: - if "function" in tool_call: - if "name" in tool_call["function"]: - del tool_call["function"]["name"] - if "arguments" in tool_call["function"]: - del tool_call["function"]["arguments"] - if not tool_call["function"]: - del tool_call["function"] - return tool_calls_copy - - def _create_event_attributes( - self, - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - thread_run_id: Optional[str] = None, - message_id: Optional[str] = None, - message_status: Optional[str] = None, - run_step_status: Optional[str] = None, - created_at: Optional[datetime] = None, - completed_at: Optional[datetime] = None, - cancelled_at: Optional[datetime] = None, - failed_at: Optional[datetime] = None, - run_step_last_error: Optional[RunStepError] = None, - usage: Optional[_models.RunStepCompletionUsage] = None, - ) -> Dict[str, Any]: - attrs: Dict[str, Any] = {GEN_AI_SYSTEM: AZ_AI_AGENT_SYSTEM} - if thread_id: - attrs[GEN_AI_THREAD_ID] = thread_id - - if agent_id: - attrs[GEN_AI_AGENT_ID] = agent_id - - if thread_run_id: - attrs[GEN_AI_THREAD_RUN_ID] = thread_run_id - - if message_id: - attrs[GEN_AI_MESSAGE_ID] = message_id - - if message_status: - attrs[GEN_AI_MESSAGE_STATUS] = self._status_to_string(message_status) - - if run_step_status: - attrs[GEN_AI_RUN_STEP_STATUS] = self._status_to_string(run_step_status) - - if created_at: - if isinstance(created_at, datetime): - attrs[GEN_AI_RUN_STEP_START_TIMESTAMP] = created_at.isoformat() - else: - # fallback in case integer or string gets passed - attrs[GEN_AI_RUN_STEP_START_TIMESTAMP] = str(created_at) - - end_timestamp = None - if completed_at: - end_timestamp = completed_at - elif cancelled_at: - end_timestamp = cancelled_at - elif failed_at: - end_timestamp = failed_at - - if isinstance(end_timestamp, datetime): - attrs[GEN_AI_RUN_STEP_END_TIMESTAMP] = end_timestamp.isoformat() - elif end_timestamp: - # fallback in case int or string gets passed - attrs[GEN_AI_RUN_STEP_END_TIMESTAMP] = str(end_timestamp) - - if run_step_last_error: - attrs[ERROR_MESSAGE] = run_step_last_error.message - attrs[ERROR_TYPE] = run_step_last_error.code - - if usage: - attrs[GEN_AI_USAGE_INPUT_TOKENS] = usage.prompt_tokens - attrs[GEN_AI_USAGE_OUTPUT_TOKENS] = usage.completion_tokens - - return attrs - - def add_thread_message_event( - self, - span, - message: ThreadMessage, - usage: Optional[_models.RunStepCompletionUsage] = None, - ) -> None: - content_body = {} - if _trace_agents_content: - for content in message.content: - typed_content = content.get(content.type, None) - if typed_content: - content_details = {"value": self._get_field(typed_content, "value")} - annotations = self._get_field(typed_content, "annotations") - if annotations: - content_details["annotations"] = [a.as_dict() for a in annotations] - content_body[content.type] = content_details - - self._add_message_event( - span, - self._get_role(message.role), - content_body, - attachments=message.attachments, - thread_id=message.thread_id, - agent_id=message.agent_id, - message_id=message.id, - thread_run_id=message.run_id, - message_status=message.status, - incomplete_details=message.incomplete_details, - usage=usage, - ) - - def _process_tool_calls(self, step: RunStep) -> List[Dict[str, Any]]: - """ - Helper method to process tool calls and return a list of tool call dictionaries. - - :param step: The run step containing tool call details to be processed. - :type step: RunStep - :return: A list of dictionaries, each representing a processed tool call. - :rtype: List[Dict[str, Any]] - """ - tool_calls = [] - tool_call: Dict[str, Any] = {} - for t in cast(RunStepToolCallDetails, step.step_details).tool_calls: - if not _trace_agents_content: - tool_call = { - "id": t.id, - "type": t.type, - } - elif isinstance(t, RunStepFunctionToolCall): - try: - parsed_arguments = json.loads(t.function.arguments) - except json.JSONDecodeError: - parsed_arguments = {} - - tool_call = { - "id": t.id, - "type": t.type, - "function": { - "name": t.function.name, - "arguments": parsed_arguments, - }, - } - elif isinstance(t, RunStepCodeInterpreterToolCall): - tool_call = { - "id": t.id, - "type": t.type, - "code_interpreter": { - "input": t.code_interpreter.input, - "outputs": [output.as_dict() for output in t.code_interpreter.outputs], - }, - } - elif isinstance(t, RunStepBingGroundingToolCall): - tool_call = { - "id": t.id, - "type": t.type, - t.type: t.bing_grounding, - } - else: - tool_details = t.as_dict()[t.type] - - tool_call = { - "id": t.id, - "type": t.type, - t.type: tool_details, - } - tool_calls.append(tool_call) - return tool_calls - - def _add_tool_call_event( - self, - span, - step: RunStep, - event_name: str, - is_run_step_listing: bool = False, - ) -> None: - """ - Adds a tool call event to a span. - - This method processes tool calls from a given run step and adds them as an event - to the provided span. It includes relevant attributes such as the run step status, - timestamps, tool call details, and optionally the message status. - - :param span: The span instance where the tool call event will be recorded. - :type span: AbstractSpan - :param step: The run step containing details about the tool calls to be processed. - :type step: RunStep - :param event_name: The name of the event to be added to the span (e.g., "gen_ai.run_step.tool_calls"). - :type event_name: str - :param is_run_step_listing: A flag indicating whether the event is part of a run step listing. - If True, the run step status is included in the attributes; otherwise, the message status is included. - :type is_run_step_listing: bool - :return: None - """ - tool_calls = self._process_tool_calls(step) - - run_step_status = None - message_status = None - if is_run_step_listing: - run_step_status = step.status - else: - message_status = step.status - - attributes = self._create_event_attributes( - thread_id=step.thread_id, - agent_id=step.agent_id, - thread_run_id=step.run_id, - message_status=message_status, - run_step_status=run_step_status, - created_at=step.created_at, - completed_at=step.completed_at, - cancelled_at=step.cancelled_at, - failed_at=step.failed_at, - run_step_last_error=step.last_error, - usage=step.usage, - ) - - if tool_calls: - attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}, ensure_ascii=False) - - span.span_instance.add_event(name=event_name, attributes=attributes) - - def add_run_step_event(self, span, step: RunStep) -> None: - """ - Adds a run step event to the span. - - This method determines the type of the run step and adds the appropriate event - to the provided span. It processes either a "message_creation" or "tool_calls" - run step and includes relevant attributes such as the run step status, timestamps, - and tool call or message details. - - :param span: The span instance where the run step event will be recorded. - :type span: AbstractSpan - :param step: The run step containing details about the event to be added. - :type step: RunStep - :return: None - """ - if step["type"] == "message_creation": - self._add_message_creation_run_step_event(span, step) - elif step["type"] == "tool_calls": - self._add_tool_call_event(span, step, "gen_ai.run_step.tool_calls", is_run_step_listing=True) - - def _add_message_creation_run_step_event(self, span, step: RunStep) -> None: - attributes = self._create_event_attributes( - thread_id=step.thread_id, - agent_id=step.agent_id, - thread_run_id=step.run_id, - message_id=step["step_details"]["message_creation"]["message_id"], - run_step_status=step.status, - created_at=step.created_at, - completed_at=step.completed_at, - cancelled_at=step.cancelled_at, - failed_at=step.failed_at, - run_step_last_error=step.last_error, - usage=step.usage, - ) - span.span_instance.add_event(name="gen_ai.run_step.message_creation", attributes=attributes) - - def _add_message_event( - self, - span, - role: str, - content: Any, - attachments: Any = None, # Optional[List[MessageAttachment]] or dict - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - message_id: Optional[str] = None, - thread_run_id: Optional[str] = None, - message_status: Optional[str] = None, - incomplete_details: Optional[MessageIncompleteDetails] = None, - usage: Optional[_models.RunStepCompletionUsage] = None, - ) -> None: - # TODO document new fields - - event_body = {} - if _trace_agents_content: - event_body["content"] = content - if attachments: - event_body["attachments"] = [] - for attachment in attachments: - attachment_body = {"id": attachment.file_id} - if attachment.tools: - attachment_body["tools"] = [self._get_field(tool, "type") for tool in attachment.tools] - event_body["attachments"].append(attachment_body) - - if incomplete_details: - event_body["incomplete_details"] = incomplete_details - event_body["role"] = role - - attributes = self._create_event_attributes( - thread_id=thread_id, - agent_id=agent_id, - thread_run_id=thread_run_id, - message_id=message_id, - message_status=message_status, - usage=usage, - ) - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body, ensure_ascii=False) - span.span_instance.add_event(name=f"gen_ai.{role}.message", attributes=attributes) - - def _get_field(self, obj: Any, field: str) -> Any: - if not obj: - return None - - if isinstance(obj, dict): - return obj.get(field, None) - - return getattr(obj, field, None) - - def _add_instructions_event( - self, - span: "AbstractSpan", - instructions: Optional[str], - additional_instructions: Optional[str], - agent_id: Optional[str] = None, - thread_id: Optional[str] = None, - ) -> None: - if not instructions: - return - - event_body: Dict[str, Any] = {} - if _trace_agents_content and (instructions or additional_instructions): - if instructions and additional_instructions: - event_body["content"] = f"{instructions} {additional_instructions}" - else: - event_body["content"] = instructions or additional_instructions - - attributes = self._create_event_attributes(agent_id=agent_id, thread_id=thread_id) - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body, ensure_ascii=False) - span.span_instance.add_event(name=GEN_AI_SYSTEM_MESSAGE, attributes=attributes) - - def _get_role(self, role: Optional[Union[str, MessageRole]]) -> str: - if role is None or role is _Unset: - return "user" - - if isinstance(role, MessageRole): - return role.value - - return role - - def _status_to_string(self, status: Any) -> str: - return status.value if hasattr(status, "value") else status - - def _add_tool_assistant_message_event(self, span, step: RunStep) -> None: - self._add_tool_call_event(span, step, "gen_ai.assistant.message", is_run_step_listing=False) - - def set_end_run(self, span: "AbstractSpan", run: Optional[ThreadRun]) -> None: - if run and span and span.span_instance.is_recording: - span.add_attribute(GEN_AI_THREAD_RUN_STATUS, self._status_to_string(run.status)) - span.add_attribute(GEN_AI_THREAD_RUN_ID, self._status_to_string(run.id)) - span.add_attribute(GEN_AI_RESPONSE_MODEL, run.model) - if run.usage: - span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, run.usage.prompt_tokens) - span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, run.usage.completion_tokens) - - if run.last_error: - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - run.last_error.message, - ) - span.add_attribute(ERROR_TYPE, run.last_error.code) - - @staticmethod - def agent_api_response_to_str(response_format: Any) -> Optional[str]: - """ - Convert response_format to string. - - :param response_format: The response format. - :type response_format: ~azure.ai.agents._types.AgentsResponseFormatOption - :returns: string for the response_format. - :rtype: Optional[str] - :raises: Value error if response_format is not of type AgentsResponseFormatOption. - """ - if isinstance(response_format, str) or response_format is None: - return response_format - if isinstance(response_format, AgentsResponseFormatMode): - return response_format.value - if isinstance(response_format, _models.AgentsResponseFormat): - return response_format.type - if isinstance(response_format, _models.ResponseFormatJsonSchemaType): - return response_format.type - raise ValueError(f"Unknown response format {type(response_format)}") - - def start_thread_run_span( - self, - operation_name: OperationName, - server_address: Optional[str] = None, - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[ThreadMessage]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - _tools: Optional[List[ToolDefinition]] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - ) -> "Optional[AbstractSpan]": - span = start_span( - operation_name, - server_address, - thread_id=thread_id, - agent_id=agent_id, - model=model, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=_AIAgentsInstrumentorPreview.agent_api_response_to_str(response_format), - ) - if span and span.span_instance.is_recording and instructions and additional_instructions: - self._add_instructions_event( - span, - instructions, - additional_instructions, - thread_id=thread_id, - agent_id=agent_id, - ) - - if additional_messages: - for message in additional_messages: - self.add_thread_message_event(span, message) - return span - - def start_submit_tool_outputs_span( - self, - server_address: Optional[str] = None, - thread_id: Optional[str] = None, - run_id: Optional[str] = None, - tool_outputs: Optional[List[ToolOutput]] = None, - event_handler: Optional[Union[AgentEventHandler, AsyncAgentEventHandler]] = None, - ) -> "Optional[AbstractSpan]": - run_span = event_handler.span if isinstance(event_handler, _AgentEventHandlerTraceWrapper) else None - if run_span is None: - run_span = event_handler.span if isinstance(event_handler, _AsyncAgentEventHandlerTraceWrapper) else None - - if run_span: - recorded = self._add_tool_message_events(run_span, tool_outputs) - else: - recorded = False - - span = start_span( - OperationName.SUBMIT_TOOL_OUTPUTS, - server_address, - thread_id=thread_id, - run_id=run_id, - ) - if not recorded: - self._add_tool_message_events(span, tool_outputs) - return span - - def _add_tool_message_events( - self, span: "Optional[AbstractSpan]", tool_outputs: Optional[List[ToolOutput]] - ) -> bool: - if span and span.span_instance.is_recording and tool_outputs: - for tool_output in tool_outputs: - if _trace_agents_content: - body = { - "content": tool_output["output"], - "id": tool_output["tool_call_id"], - } - else: - body = {"content": "", "id": tool_output["tool_call_id"]} - span.span_instance.add_event( - "gen_ai.tool.message", - {"gen_ai.event.content": json.dumps(body, ensure_ascii=False)}, - ) - return True - - return False - - def start_create_agent_span( - self, - server_address: Optional[str] = None, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - _tools: Optional[List[ToolDefinition]] = None, - _tool_resources: Optional[ToolResources] = None, - _toolset: Optional[ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsResponseFormatOption"] = None, - ) -> "Optional[AbstractSpan]": - span = start_span( - OperationName.CREATE_AGENT, - server_address=server_address, - span_name=f"{OperationName.CREATE_AGENT.value} {name}", - model=model, - temperature=temperature, - top_p=top_p, - response_format=_AIAgentsInstrumentorPreview.agent_api_response_to_str(response_format), - ) - if span and span.span_instance.is_recording: - if name: - span.add_attribute(GEN_AI_AGENT_NAME, name) - if description: - span.add_attribute(GEN_AI_AGENT_DESCRIPTION, description) - self._add_instructions_event(span, instructions, None) - - return span - - def start_create_thread_span( - self, - server_address: Optional[str] = None, - messages: Optional[List[ThreadMessage]] = None, - _tool_resources: Optional[ToolResources] = None, - ) -> "Optional[AbstractSpan]": - span = start_span(OperationName.CREATE_THREAD, server_address=server_address) - if span and span.span_instance.is_recording: - for message in messages or []: - self.add_thread_message_event(span, message) - - return span - - def start_list_messages_span( - self, server_address: Optional[str] = None, thread_id: Optional[str] = None - ) -> "Optional[AbstractSpan]": - return start_span( - OperationName.LIST_MESSAGES, - server_address=server_address, - thread_id=thread_id, - ) - - def start_list_run_steps_span( - self, - server_address: Optional[str] = None, - run_id: Optional[str] = None, - thread_id: Optional[str] = None, - ) -> "Optional[AbstractSpan]": - return start_span( - OperationName.LIST_RUN_STEPS, - server_address=server_address, - run_id=run_id, - thread_id=thread_id, - ) - - def get_server_address_from_arg(self, arg: Any) -> Optional[str]: - """ - Extracts the base endpoint from the provided arguments _config.endpoint attribute, if that exists. - - :param arg: The argument from which the server address is to be extracted. - :type arg: Any - :return: The base endpoint. None if endpoint is not found. - :rtype: str - """ - if hasattr(arg, "_config") and hasattr( - arg._config, # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - "endpoint", - ): - endpoint = ( - arg._config.endpoint # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - ) - parsed_url = urlparse(endpoint) - return f"{parsed_url.scheme}://{parsed_url.netloc}" - return None - - def trace_create_agent(self, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - name = kwargs.get("name") - model = kwargs.get("model") - description = kwargs.get("description") - instructions = kwargs.get("instructions") - tools = kwargs.get("tools") - tool_resources = kwargs.get("tool_resources") - toolset = kwargs.get("toolset") - temperature = kwargs.get("temperature") - top_p = kwargs.get("top_p") - response_format = kwargs.get("response_format") - - span = self.start_create_agent_span( - server_address=server_address, - name=name, - model=model, - description=description, - instructions=instructions, - _tools=tools, - _tool_resources=tool_resources, - _toolset=toolset, - temperature=temperature, - top_p=top_p, - response_format=response_format, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - span.add_attribute(GEN_AI_AGENT_ID, result.id) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - async def trace_create_agent_async(self, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - name = kwargs.get("name") - model = kwargs.get("model") - description = kwargs.get("description") - instructions = kwargs.get("instructions") - tools = kwargs.get("tools") - tool_resources = kwargs.get("tool_resources") - toolset = kwargs.get("toolset") - temperature = kwargs.get("temperature") - top_p = kwargs.get("top_p") - response_format = kwargs.get("response_format") - - span = self.start_create_agent_span( - server_address=server_address, - name=name, - model=model, - description=description, - instructions=instructions, - _tools=tools, - _tool_resources=tool_resources, - _toolset=toolset, - temperature=temperature, - top_p=top_p, - response_format=response_format, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - span.add_attribute(GEN_AI_AGENT_ID, result.id) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - def trace_create_thread(self, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - messages = kwargs.get("messages") - - span = self.start_create_thread_span(server_address=server_address, messages=messages) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - async def trace_create_thread_async(self, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - messages = kwargs.get("messages") - - span = self.start_create_thread_span(server_address=server_address, messages=messages) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - def trace_create_message(self, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - role = kwargs.get("role") - content = kwargs.get("content") - attachments = kwargs.get("attachments") - - span = self.start_create_message_span( - server_address=server_address, - thread_id=thread_id, - content=content, - role=role, - attachments=attachments, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - async def trace_create_message_async(self, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - role = kwargs.get("role") - content = kwargs.get("content") - attachments = kwargs.get("attachments") - - span = self.start_create_message_span( - server_address=server_address, - thread_id=thread_id, - content=content, - role=role, - attachments=attachments, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - def trace_create_run(self, operation_name, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - agent_id = kwargs.get("agent_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - - span = self.start_thread_run_span( - operation_name, - server_address, - thread_id, - agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - if result: - span.add_attribute(GEN_AI_THREAD_RUN_ID, result.get("id")) - - self.set_end_run(span, result) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - async def trace_create_run_async(self, operation_name, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - agent_id = kwargs.get("agent_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - - span = self.start_thread_run_span( - operation_name, - server_address, - thread_id, - agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - if result: - span.add_attribute(GEN_AI_THREAD_RUN_ID, result.get("id")) - - self.set_end_run(span, result) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - def trace_get_run(self, operation_name, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - run_id = kwargs.get("run_id") - - span = start_span( - operation_name, - server_address=server_address, - thread_id=thread_id, - run_id=run_id - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - if result: - span.add_attribute(GEN_AI_AGENT_ID, result.agent_id) - - self.set_end_run(span, result) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - async def trace_get_run_async(self, operation_name, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - run_id = kwargs.get("run_id") - - span = start_span( - operation_name, - server_address=server_address, - thread_id=thread_id, - run_id=run_id - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - if result: - span.add_attribute(GEN_AI_AGENT_ID, result.agent_id) - self.set_end_run(span, result) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - run_id = kwargs.get("run_id") - tool_outputs = kwargs.get("tool_outputs") - event_handler = kwargs.get("event_handler") - - span = self.start_submit_tool_outputs_span( - server_address=server_address, - thread_id=thread_id, - run_id=run_id, - tool_outputs=tool_outputs, - event_handler=event_handler, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - if stream and event_handler: - kwargs["event_handler"] = self.wrap_handler(event_handler, span) - - result = function(*args, **kwargs) - if not isinstance(result, AgentRunStream): - self.set_end_run(span, result) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - async def trace_submit_tool_outputs_async(self, stream, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - run_id = kwargs.get("run_id") - tool_outputs = kwargs.get("tool_outputs") - event_handler = kwargs.get("event_handler") - - span = self.start_submit_tool_outputs_span( - server_address=server_address, - thread_id=thread_id, - run_id=run_id, - tool_outputs=tool_outputs, - event_handler=event_handler, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - if stream: - kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) - - result = await function(*args, **kwargs) - if not isinstance(result, AsyncAgentRunStream): - self.set_end_run(span, result) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - def trace_handle_submit_tool_outputs(self, function, *args, **kwargs): - event_handler = kwargs.get("event_handler") - if event_handler is None: - event_handler = args[2] - span = getattr(event_handler, "span", None) - - if span is None: - return function(*args, **kwargs) - - with span.change_context(span.span_instance): - try: - result = function(*args, **kwargs) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - async def trace_handle_submit_tool_outputs_async(self, function, *args, **kwargs): - event_handler = kwargs.get("event_handler") - if event_handler is None: - event_handler = args[2] - span = getattr(event_handler, "span", None) - - if span is None: - return await function(*args, **kwargs) - - with span.change_context(span.span_instance): - try: - result = await function(*args, **kwargs) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - def trace_create_stream(self, function, *args, **kwargs): - operation_name = OperationName.PROCESS_THREAD_RUN - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - agent_id = kwargs.get("agent_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - event_handler = kwargs.get("event_handler") - - span = self.start_thread_run_span( - operation_name, - server_address=server_address, - thread_id=thread_id, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return function(*args, **kwargs) - - with span.change_context(span.span_instance): - try: - kwargs["event_handler"] = self.wrap_handler(event_handler, span) - result = function(*args, **kwargs) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - async def trace_create_stream_async(self, function, *args, **kwargs): - operation_name = OperationName.PROCESS_THREAD_RUN - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - agent_id = kwargs.get("agent_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - event_handler = kwargs.get("event_handler") - - span = self.start_thread_run_span( - operation_name, - server_address=server_address, - thread_id=thread_id, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return await function(*args, **kwargs) - - # TODO: how to keep span active in the current context without existing? - # TODO: dummy span for none - with span.change_context(span.span_instance): - try: - kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) - result = await function(*args, **kwargs) - except Exception as exc: - self.record_error(span, exc) - raise - - return result - - def trace_list_messages(self, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - - span = self.start_list_messages_span(server_address=server_address, thread_id=thread_id) - - return _InstrumentedItemPaged(function(*args, **kwargs), self.add_thread_message_event, span) - - def trace_list_messages_async(self, function, *args, **kwargs): - # Note that this method is not async, but it operates on AsyncIterable. - server_address = self.get_server_address_from_arg(args[0]) - thread_id = kwargs.get("thread_id") - - span = self.start_list_messages_span(server_address=server_address, thread_id=thread_id) - - return _AsyncInstrumentedItemPaged(function(*args, **kwargs), self.add_thread_message_event, span) - - def trace_list_run_steps(self, function, *args, **kwargs): - server_address = self.get_server_address_from_arg(args[0]) - run_id = kwargs.get("run_id") - thread_id = kwargs.get("thread_id") - - span = self.start_list_run_steps_span(server_address=server_address, run_id=run_id, thread_id=thread_id) - - return _InstrumentedItemPaged(function(*args, **kwargs), self.add_run_step_event, span) - - def trace_list_run_steps_async(self, function, *args, **kwargs): - # Note that this method is not async, but it operates on AsyncIterable. - server_address = self.get_server_address_from_arg(args[0]) - run_id = kwargs.get("run_id") - thread_id = kwargs.get("thread_id") - - span = self.start_list_run_steps_span(server_address=server_address, run_id=run_id, thread_id=thread_id) - - return _AsyncInstrumentedItemPaged(function(*args, **kwargs), self.add_run_step_event, span) - - def handle_run_stream_exit(self, _function, *args, **kwargs): - agent_run_stream = args[0] - exc_type = kwargs.get("exc_type") - exc_val = kwargs.get("exc_val") - exc_tb = kwargs.get("exc_tb") - # TODO: is it a good idea? - # if not, we'll need to wrap stream and call exit - if ( - agent_run_stream.event_handler - and agent_run_stream.event_handler.__class__.__name__ == "_AgentEventHandlerTraceWrapper" - ): - agent_run_stream.event_handler.__exit__(exc_type, exc_val, exc_tb) - elif ( - agent_run_stream.event_handler - and agent_run_stream.event_handler.__class__.__name__ == "_AsyncAgentEventHandlerTraceWrapper" - ): - agent_run_stream.event_handler.__aexit__(exc_type, exc_val, exc_tb) - - def wrap_handler( - self, - handler: "Optional[AgentEventHandler]" = None, - span: "Optional[AbstractSpan]" = None, - ) -> "Optional[AgentEventHandler]": - # Do not create a handler wrapper if we do not have handler in the first place. - if not handler: - return None - - if isinstance(handler, _AgentEventHandlerTraceWrapper): - return handler - - if span and span.span_instance.is_recording: - return _AgentEventHandlerTraceWrapper(self, span, handler) - - return handler - - def wrap_async_handler( - self, - handler: "Optional[AsyncAgentEventHandler]" = None, - span: "Optional[AbstractSpan]" = None, - ) -> "Optional[AsyncAgentEventHandler]": - # Do not create a handler wrapper if we do not have handler in the first place. - if not handler: - return None - - if isinstance(handler, _AsyncAgentEventHandlerTraceWrapper): - return handler - - if span and span.span_instance.is_recording: - return _AsyncAgentEventHandlerTraceWrapper(self, span, handler) - - return handler - - def start_create_message_span( - self, - server_address: Optional[str] = None, - thread_id: Optional[str] = None, - content: Optional[str] = None, - role: Optional[Union[str, MessageRole]] = None, - attachments: Optional[List[MessageAttachment]] = None, - ) -> "Optional[AbstractSpan]": - role_str = self._get_role(role) - span = start_span(OperationName.CREATE_MESSAGE, server_address, thread_id=thread_id) - if span and span.span_instance.is_recording: - self._add_message_event(span, role_str, content, attachments=attachments, thread_id=thread_id) - return span - - def _trace_sync_function( - self, - function: Callable, - *, - _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AGENTS, - _name: Optional[str] = None, - ) -> Callable: - """ - Decorator that adds tracing to a synchronous function. - - :param function: The function to be traced. - :type function: Callable - :param args_to_ignore: A list of argument names to be ignored in the trace. - Defaults to None. - :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AGENTS. - :type trace_type: TraceType, optional - :param name: The name of the trace, will set to func name if not provided. - :type name: str, optional - :return: The traced function. - :rtype: Callable - """ - - @functools.wraps(function) - def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 - if span_impl_type is None: - return function(*args, **kwargs) - - class_function_name = function.__qualname__ - - if class_function_name.startswith("AgentsClient.create_agent"): - kwargs.setdefault("merge_span", True) - return self.trace_create_agent(function, *args, **kwargs) - if class_function_name.startswith("ThreadsOperations.create"): - kwargs.setdefault("merge_span", True) - return self.trace_create_thread(function, *args, **kwargs) - if class_function_name.startswith("MessagesOperations.create"): - kwargs.setdefault("merge_span", True) - return self.trace_create_message(function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.create"): - kwargs.setdefault("merge_span", True) - return self.trace_create_run(OperationName.START_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.get"): - kwargs.setdefault("merge_span", True) - return self.trace_get_run(OperationName.GET_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.create_and_process"): - kwargs.setdefault("merge_span", True) - return self.trace_create_run(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.submit_tool_outputs"): - kwargs.setdefault("merge_span", True) - return self.trace_submit_tool_outputs(False, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.submit_tool_outputs_stream"): - kwargs.setdefault("merge_span", True) - return self.trace_submit_tool_outputs(True, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations._handle_submit_tool_outputs"): - return self.trace_handle_submit_tool_outputs(function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.stream"): - kwargs.setdefault("merge_span", True) - return self.trace_create_stream(function, *args, **kwargs) - if class_function_name.startswith("MessagesOperations.list"): - kwargs.setdefault("merge_span", True) - return self.trace_list_messages(function, *args, **kwargs) - if class_function_name.startswith("RunStepsOperations.list"): - return self.trace_list_run_steps(function, *args, **kwargs) - if class_function_name.startswith("AgentRunStream.__exit__"): - return self.handle_run_stream_exit(function, *args, **kwargs) - # Handle the default case (if the function name does not match) - return None # Ensure all paths return - - return inner - - def _trace_async_function( - self, - function: Callable, - *, - _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AGENTS, - _name: Optional[str] = None, - ) -> Callable: - """ - Decorator that adds tracing to an asynchronous function. - - :param function: The function to be traced. - :type function: Callable - :param args_to_ignore: A list of argument names to be ignored in the trace. - Defaults to None. - :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AGENTS. - :type trace_type: TraceType, optional - :param name: The name of the trace, will set to func name if not provided. - :type name: str, optional - :return: The traced function. - :rtype: Callable - """ - - @functools.wraps(function) - async def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 - if span_impl_type is None: - return function(*args, **kwargs) - - class_function_name = function.__qualname__ - - if class_function_name.startswith("AgentsClient.create_agent"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_agent_async(function, *args, **kwargs) - if class_function_name.startswith("ThreadsOperations.create"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_thread_async(function, *args, **kwargs) - if class_function_name.startswith("MessagesOperations.create"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_message_async(function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.create"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_run_async(OperationName.START_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.get"): - kwargs.setdefault("merge_span", True) - return await self.trace_get_run_async(OperationName.GET_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.create_and_process"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_run_async(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.submit_tool_outputs"): - kwargs.setdefault("merge_span", True) - return await self.trace_submit_tool_outputs_async(False, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.submit_tool_outputs_stream"): - kwargs.setdefault("merge_span", True) - return await self.trace_submit_tool_outputs_async(True, function, *args, **kwargs) - if class_function_name.startswith("RunsOperations._handle_submit_tool_outputs"): - return await self.trace_handle_submit_tool_outputs_async(function, *args, **kwargs) - if class_function_name.startswith("RunsOperations.stream"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_stream_async(function, *args, **kwargs) - if class_function_name.startswith("AsyncAgentRunStream.__aexit__"): - return self.handle_run_stream_exit(function, *args, **kwargs) - # Handle the default case (if the function name does not match) - return None # Ensure all paths return - - return inner - - def _trace_async_list_function( - self, - function: Callable, - *, - _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AGENTS, - _name: Optional[str] = None, - ) -> Callable: - """ - Decorator that adds tracing to an asynchronous function. - - :param function: The function to be traced. - :type function: Callable - :param args_to_ignore: A list of argument names to be ignored in the trace. - Defaults to None. - :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AGENTS. - :type trace_type: TraceType, optional - :param name: The name of the trace, will set to func name if not provided. - :type name: str, optional - :return: The traced function. - :rtype: Callable - """ - - @functools.wraps(function) - def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 - if span_impl_type is None: - return function(*args, **kwargs) - - class_function_name = function.__qualname__ - if class_function_name.startswith("MessagesOperations.list"): - kwargs.setdefault("merge_span", True) - return self.trace_list_messages_async(function, *args, **kwargs) - if class_function_name.startswith("RunStepsOperations.list"): - kwargs.setdefault("merge_span", True) - return self.trace_list_run_steps_async(function, *args, **kwargs) - # Handle the default case (if the function name does not match) - return None # Ensure all paths return - - return inner - - def _inject_async(self, f, _trace_type, _name): - if _name.startswith("list"): - wrapper_fun = self._trace_async_list_function(f) - else: - wrapper_fun = self._trace_async_function(f) - wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - return wrapper_fun - - def _inject_sync(self, f, _trace_type, _name): - wrapper_fun = self._trace_sync_function(f) - wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - return wrapper_fun - - def _agents_apis(self): - sync_apis = ( - ( - "azure.ai.agents", - "AgentsClient", - "create_agent", - TraceType.AGENTS, - "agent_create", - ), - ( - "azure.ai.agents.operations", - "ThreadsOperations", - "create", - TraceType.AGENTS, - "thread_create", - ), - ( - "azure.ai.agents.operations", - "MessagesOperations", - "create", - TraceType.AGENTS, - "message_create", - ), - ( - "azure.ai.agents.operations", - "RunsOperations", - "create", - TraceType.AGENTS, - "create_run", - ), - ( - "azure.ai.agents.operations", - "RunsOperations", - "get", - TraceType.AGENTS, - "get_run", - ), - ( - "azure.ai.agents.operations", - "RunsOperations", - "create_and_process", - TraceType.AGENTS, - "create_and_process", - ), - ( - "azure.ai.agents.operations", - "RunsOperations", - "submit_tool_outputs", - TraceType.AGENTS, - "submit_tool_outputs", - ), - ( - "azure.ai.agents.operations", - "RunsOperations", - "submit_tool_outputs_stream", - TraceType.AGENTS, - "submit_tool_outputs_stream", - ), - ( - "azure.ai.agents.operations", - "RunsOperations", - "_handle_submit_tool_outputs", - TraceType.AGENTS, - "_handle_submit_tool_outputs", - ), - ( - "azure.ai.agents.operations", - "RunsOperations", - "stream", - TraceType.AGENTS, - "stream", - ), - # Switching off the instrumentation for list method as it requires - # monkey patching inside pageable class. - ( - "azure.ai.agents.operations", - "MessagesOperations", - "list", - TraceType.AGENTS, - "list_messages", - ), - ( - "azure.ai.agents.operations", - "RunStepsOperations", - "list", - TraceType.AGENTS, - "list_run_steps", - ), - ( - "azure.ai.agents.models", - "AgentRunStream", - "__exit__", - TraceType.AGENTS, - "__exit__", - ), - ) - async_apis = ( - ( - "azure.ai.agents.aio", - "AgentsClient", - "create_agent", - TraceType.AGENTS, - "agent_create", - ), - ( - "azure.ai.agents.aio.operations", - "ThreadsOperations", - "create", - TraceType.AGENTS, - "agents_thread_create", - ), - ( - "azure.ai.agents.aio.operations", - "MessagesOperations", - "create", - TraceType.AGENTS, - "agents_thread_message_create", - ), - ( - "azure.ai.agents.aio.operations", - "RunsOperations", - "create", - TraceType.AGENTS, - "create_run", - ), - ( - "azure.ai.agents.aio.operations", - "RunsOperations", - "get", - TraceType.AGENTS, - "get_run", - ), - ( - "azure.ai.agents.aio.operations", - "RunsOperations", - "create_and_process", - TraceType.AGENTS, - "create_and_process_run", - ), - ( - "azure.ai.agents.aio.operations", - "RunsOperations", - "submit_tool_outputs", - TraceType.AGENTS, - "submit_tool_outputs", - ), - ( - "azure.ai.agents.aio.operations", - "RunsOperations", - "submit_tool_outputs_stream", - TraceType.AGENTS, - "submit_tool_outputs_stream", - ), - ( - "azure.ai.agents.aio.operations", - "RunsOperations", - "_handle_submit_tool_outputs", - TraceType.AGENTS, - "_handle_submit_tool_outputs", - ), - ( - "azure.ai.agents.aio.operations", - "RunsOperations", - "stream", - TraceType.AGENTS, - "stream", - ), - # Switching off the instrumentation for list method as it requires - # monkey patching inside async pageable class. - ( - "azure.ai.agents.aio.operations", - "MessagesOperations", - "list", - TraceType.AGENTS, - "list_messages", - ), - ( - "azure.ai.agents.aio.operations", - "RunStepsOperations", - "list", - TraceType.AGENTS, - "list_run_steps", - ), - ( - "azure.ai.agents.models", - "AsyncAgentRunStream", - "__aexit__", - TraceType.AGENTS, - "__aexit__", - ), - ) - return sync_apis, async_apis - - def _agents_api_list(self): - sync_apis, async_apis = self._agents_apis() - yield sync_apis, self._inject_sync - yield async_apis, self._inject_async - - def _generate_api_and_injector(self, apis): - for api, injector in apis: - for module_name, class_name, method_name, trace_type, name in api: - try: - module = importlib.import_module(module_name) - api = getattr(module, class_name) - if hasattr(api, method_name): - # The function list is sync in both sync and async classes. - yield api, method_name, trace_type, injector, name - except AttributeError as e: - # Log the attribute exception with the missing class information - logger.warning( - "AttributeError: The module '%s' does not have the class '%s'. %s", - module_name, - class_name, - str(e), - ) - except Exception as e: # pylint: disable=broad-except - # Log other exceptions as a warning, as we are not sure what they might be - logger.warning("An unexpected error occurred: '%s'", str(e)) - - def _available_agents_apis_and_injectors(self): - """ - Generates a sequence of tuples containing Agents API classes, method names, and - corresponding injector functions. - - :return: A generator yielding tuples. - :rtype: tuple - """ - yield from self._generate_api_and_injector(self._agents_api_list()) - - def _instrument_agents(self, enable_content_tracing: bool = False): - """This function modifies the methods of the Agents API classes to - inject logic before calling the original methods. - The original methods are stored as _original attributes of the methods. - - :param enable_content_tracing: Indicates whether tracing of message content should be enabled. - This also controls whether function call tool function names, - parameter names and parameter values are traced. - :type enable_content_tracing: bool - """ - # pylint: disable=W0603 - global _agents_traces_enabled - global _trace_agents_content - if _agents_traces_enabled: - raise RuntimeError("Traces already started for AI Agents") - _agents_traces_enabled = True - _trace_agents_content = enable_content_tracing - for ( - api, - method, - trace_type, - injector, - name, - ) in self._available_agents_apis_and_injectors(): - # Check if the method of the api class has already been modified - if not hasattr(getattr(api, method), "_original"): - setattr(api, method, injector(getattr(api, method), trace_type, name)) - - def _uninstrument_agents(self): - """This function restores the original methods of the Agents API classes - by assigning them back from the _original attributes of the modified methods. - """ - # pylint: disable=W0603 - global _agents_traces_enabled - global _trace_agents_content - _trace_agents_content = False - for api, method, _, _, _ in self._available_agents_apis_and_injectors(): - if hasattr(getattr(api, method), "_original"): - setattr(api, method, getattr(getattr(api, method), "_original")) - _agents_traces_enabled = False - - def _is_instrumented(self): - """This function returns True if Agents API has already been instrumented - for tracing and False if it has not been instrumented. - - :return: A value indicating whether the Agents API is currently instrumented or not. - :rtype: bool - """ - return _agents_traces_enabled - - def _set_enable_content_recording(self, enable_content_recording: bool = False) -> None: - """This function sets the content recording value. - - :param enable_content_recording: Indicates whether tracing of message content should be enabled. - This also controls whether function call tool function names, - parameter names and parameter values are traced. - :type enable_content_recording: bool - """ - global _trace_agents_content # pylint: disable=W0603 - _trace_agents_content = enable_content_recording - - def _is_content_recording_enabled(self) -> bool: - """This function gets the content recording value. - - :return: A bool value indicating whether content tracing is enabled. - :rtype bool - """ - return _trace_agents_content - - def record_error(self, span, exc): - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - -class _AgentEventHandlerTraceWrapper(AgentEventHandler): - def __init__( - self, - instrumentor: _AIAgentsInstrumentorPreview, - span: "AbstractSpan", - inner_handler: Optional[AgentEventHandler] = None, - ): - super().__init__() - self.span = span - self.inner_handler = inner_handler - self.ended = False - self.last_run: Optional[ThreadRun] = None - self.last_message: Optional[ThreadMessage] = None - self.instrumentor = instrumentor - - def initialize( - self, - response_iterator, - submit_tool_outputs, - ) -> None: - self.submit_tool_outputs = submit_tool_outputs - if self.inner_handler: - self.inner_handler.initialize( - response_iterator=response_iterator, - submit_tool_outputs=submit_tool_outputs, - ) - - def __next__(self) -> Any: - if self.inner_handler: - event_bytes = self.inner_handler.__next_impl__() - return self._process_event(event_bytes.decode("utf-8")) - return None - - # pylint: disable=R1710 - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_message_delta(delta) # type: ignore - - def on_thread_message(self, message: "ThreadMessage") -> None: # type: ignore[func-returns-value] - retval = None - if self.inner_handler: - retval = self.inner_handler.on_thread_message(message) # type: ignore - - if message.status in {"completed", "incomplete"}: - self.last_message = message - - return retval # type: ignore - - def on_thread_run(self, run: "ThreadRun") -> None: # type: ignore[func-returns-value] - retval = None - - if self.inner_handler: - retval = self.inner_handler.on_thread_run(run) # type: ignore - self.last_run = run - - return retval # type: ignore - - def on_run_step(self, step: "RunStep") -> None: # type: ignore[func-returns-value] - retval = None - if self.inner_handler: - retval = self.inner_handler.on_run_step(step) # type: ignore - - if ( - step.type == "tool_calls" - and isinstance(step.step_details, RunStepToolCallDetails) - and step.status == RunStepStatus.COMPLETED - ): - self.instrumentor._add_tool_assistant_message_event( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - self.span, step - ) - elif step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: - self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) - if ( - self.span - and self.span.span_instance.is_recording - and isinstance(step.step_details, RunStepMessageCreationDetails) - ): - self.span.add_attribute(GEN_AI_MESSAGE_ID, step.step_details.message_creation.message_id) - - self.last_message = None - - return retval # type: ignore - - def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_run_step_delta(delta) # type: ignore - - def on_error(self, data: str) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_error(data) # type: ignore - - def on_done(self) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_done() # type: ignore - # it could be called multiple tines (for each step) __exit__ - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_unhandled_event(event_type, event_data) # type: ignore - - # pylint: enable=R1710 - - def __exit__(self, exc_type, exc_val, exc_tb): - if not self.ended: - self.ended = True - self.instrumentor.set_end_run(self.span, self.last_run) - - if self.last_run and self.last_run.last_error: - self.span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - self.last_run.last_error.message, - ) - self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) - - self.span.__exit__(exc_type, exc_val, exc_tb) - self.span.finish() - - -class _AsyncAgentEventHandlerTraceWrapper(AsyncAgentEventHandler): - def __init__( - self, - instrumentor: _AIAgentsInstrumentorPreview, - span: "AbstractSpan", - inner_handler: Optional[AsyncAgentEventHandler] = None, - ): - super().__init__() - self.span = span - self.inner_handler = inner_handler - self.ended = False - self.last_run: Optional[ThreadRun] = None - self.last_message: Optional[ThreadMessage] = None - self.instrumentor = instrumentor - - def initialize( - self, - response_iterator, - submit_tool_outputs, - ) -> None: - self.submit_tool_outputs = submit_tool_outputs - if self.inner_handler: - self.inner_handler.initialize( - response_iterator=response_iterator, - submit_tool_outputs=submit_tool_outputs, - ) - - # cspell:disable-next-line - async def __anext__(self) -> Any: - if self.inner_handler: - # cspell:disable-next-line - event_bytes = await self.inner_handler.__anext_impl__() - return await self._process_event(event_bytes.decode("utf-8")) - - # pylint: disable=R1710 - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_message_delta(delta) # type: ignore - - async def on_thread_message(self, message: "ThreadMessage") -> None: # type: ignore[func-returns-value] - retval = None - if self.inner_handler: - retval = await self.inner_handler.on_thread_message(message) # type: ignore - - if message.status in {"completed", "incomplete"}: - self.last_message = message - - return retval # type: ignore - - async def on_thread_run(self, run: "ThreadRun") -> None: # type: ignore[func-returns-value] - retval = None - - if self.inner_handler: - retval = await self.inner_handler.on_thread_run(run) # type: ignore - self.last_run = run - - return retval # type: ignore - - async def on_run_step(self, step: "RunStep") -> None: # type: ignore[func-returns-value] - retval = None - if self.inner_handler: - retval = await self.inner_handler.on_run_step(step) # type: ignore - - if ( - step.type == "tool_calls" - and isinstance(step.step_details, RunStepToolCallDetails) - and step.status == RunStepStatus.COMPLETED - ): - self.instrumentor._add_tool_assistant_message_event( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - self.span, step - ) - elif step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: - self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) - if ( - self.span - and self.span.span_instance.is_recording - and isinstance(step.step_details, RunStepMessageCreationDetails) - ): - self.span.add_attribute(GEN_AI_MESSAGE_ID, step.step_details.message_creation.message_id) - - self.last_message = None - - return retval # type: ignore - - async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_run_step_delta(delta) # type: ignore - - async def on_error(self, data: str) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_error(data) # type: ignore - - async def on_done(self) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_done() # type: ignore - # it could be called multiple tines (for each step) __exit__ - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_unhandled_event(event_type, event_data) # type: ignore - - # pylint: enable=R1710 - - def __aexit__(self, exc_type, exc_val, exc_tb): - if not self.ended: - self.ended = True - self.instrumentor.set_end_run(self.span, self.last_run) - - self.span.__exit__(exc_type, exc_val, exc_tb) - self.span.finish() diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_instrument_paged_wrappers.py b/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_instrument_paged_wrappers.py deleted file mode 100644 index e5a3ad2cd0e2..000000000000 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_instrument_paged_wrappers.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -from typing import Any, Callable, AsyncIterator, Iterator, Optional, cast -from types import TracebackType - -from opentelemetry.trace import Span, StatusCode - -from azure.core.tracing._abstract_span import AbstractSpan -from azure.core.async_paging import AsyncItemPaged -from azure.core.paging import ItemPaged - - -class _SpanLogger: - """The class, providing safe logging of events to the span""" - - def log_to_span_safe( - self, - val: Any, - instrumentation_fun: Callable[[AbstractSpan, Any], None], - span: AbstractSpan, - ) -> None: - """ - Log value to the span if span exists. - - :param val: The value to be logged. - :type val: Any - :param instrumentation_fun: The function to be used to log val. - :type instrumentation_fun: Callable[[AbstractSpan, Any], None] - :param span: The span to be used for logging. Span must be opened before calling this method. - :type span: AbstractSpan - """ - try: - instrumentation_fun(span, val) - - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - span.add_attribute("error.type", error_type) - raise - - -class _AsyncInstrumentedItemPaged(AsyncItemPaged, _SpanLogger): - """The list class to mimic the AsyncPageable returned by a list.""" - - def __init__( - self, - async_iter: AsyncItemPaged, - instrumentation_fun: Callable[[AbstractSpan, Any], None], - span: Optional[AbstractSpan], - ) -> None: - super().__init__() - self._iter = async_iter - self._inst_fun = instrumentation_fun - self._span = span - self._gen: Optional[AsyncIterator[Any]] = None - - def __getattr__(self, name: str) -> Any: - """ - Delegate every attribute we do not override to the wrapped object - - :param name: The name of the attribute to get. - :type name: str - - :return: The value of the attribute. - :rtype: Any - """ - return getattr(self._iter, name) - - def __aiter__(self) -> AsyncIterator[Any]: - async def _gen() -> AsyncIterator[Any]: - try: - async for val in self._iter: - if self._span is not None: - self.log_to_span_safe(val, self._inst_fun, self._span) - yield val - finally: - if self._span is not None: - # We cast None to TracebackType, because traceback is - # not used in the downstream code. - self._span.__exit__(None, None, cast(TracebackType, None)) - - if self._gen is None: - if self._span is not None: - self._span.__enter__() - self._gen = _gen() - return self._gen - - -class _InstrumentedItemPaged(ItemPaged, _SpanLogger): - """The list class to mimic the Pageable returned by a list.""" - - def __init__( - self, - iter_val: ItemPaged, - instrumentation_fun: Callable[[AbstractSpan, Any], None], - span: Optional[AbstractSpan], - ) -> None: - super().__init__() - self._iter = iter_val - self._inst_fun = instrumentation_fun - self._span = span - self._gen: Optional[Iterator[Any]] = None - - def __getattr__(self, name: str) -> Any: - """ - Delegate every attribute we do not override to the wrapped object - - :param name: The name of the attribute to get. - :type name: str - - :return: The value of the attribute. - :rtype: Any - """ - return getattr(self._iter, name) - - def __iter__(self) -> Iterator[Any]: - def _gen() -> Iterator[Any]: - try: - for val in self._iter: - if self._span is not None: - self.log_to_span_safe(val, self._inst_fun, self._span) - yield val - finally: - if self._span is not None: - # We cast None to TracebackType, because traceback is - # not used in the downstream code. - self._span.__exit__(None, None, cast(TracebackType, None)) - - if self._gen is None: - if self._span is not None: - self._span.__enter__() - self._gen = _gen() - return self._gen diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_trace_function.py b/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_trace_function.py deleted file mode 100644 index 0ac5ea43c13f..000000000000 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_trace_function.py +++ /dev/null @@ -1,204 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import functools -import asyncio # pylint: disable = do-not-import-asyncio -from typing import Any, Callable, Optional, Dict - -try: - # pylint: disable = no-name-in-module - from opentelemetry import trace as opentelemetry_trace - - tracer = opentelemetry_trace.get_tracer(__name__) - _tracing_library_available = True -except ModuleNotFoundError: - _tracing_library_available = False - -if _tracing_library_available: - - def trace_function(span_name: Optional[str] = None): - """ - A decorator for tracing function calls using OpenTelemetry. - - This decorator handles various data types for function parameters and return values, - and records them as attributes in the trace span. The supported data types include: - - Basic data types: str, int, float, bool - - Collections: list, dict, tuple, set - - Special handling for collections: - - If a collection (list, dict, tuple, set) contains nested collections, the entire collection - is converted to a string before being recorded as an attribute. - - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. - - Object types are omitted, and the corresponding parameter is not traced. - - :param span_name: The name of the span. If not provided, the function name is used. - :type span_name: Optional[str] - :return: The decorated function with tracing enabled. - :rtype: Callable - """ - - def decorator(func: Callable) -> Callable: - @functools.wraps(func) - async def async_wrapper(*args: Any, **kwargs: Any) -> Any: - """ - Wrapper function for asynchronous functions. - - :param args: Positional arguments passed to the function. - :type args: Tuple[Any] - :return: The result of the decorated asynchronous function. - :rtype: Any - """ - name = span_name if span_name else func.__name__ - with tracer.start_as_current_span(name) as span: - try: - # Sanitize parameters and set them as attributes - sanitized_params = sanitize_parameters(func, *args, **kwargs) - span.set_attributes(sanitized_params) - result = await func(*args, **kwargs) - sanitized_result = sanitize_for_attributes(result) - if sanitized_result is not None: - if isinstance(sanitized_result, (list, dict, tuple, set)): - if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): - sanitized_result = str(sanitized_result) - span.set_attribute("code.function.return.value", sanitized_result) # type: ignore - return result - except Exception as e: - span.record_exception(e) - span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore - raise - - @functools.wraps(func) - def sync_wrapper(*args: Any, **kwargs: Any) -> Any: - """ - Wrapper function for synchronous functions. - - :param args: Positional arguments passed to the function. - :type args: Tuple[Any] - :return: The result of the decorated synchronous function. - :rtype: Any - """ - name = span_name if span_name else func.__name__ - with tracer.start_as_current_span(name) as span: - try: - # Sanitize parameters and set them as attributes - sanitized_params = sanitize_parameters(func, *args, **kwargs) - span.set_attributes(sanitized_params) - result = func(*args, **kwargs) - sanitized_result = sanitize_for_attributes(result) - if sanitized_result is not None: - if isinstance(sanitized_result, (list, dict, tuple, set)): - if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): - sanitized_result = str(sanitized_result) - span.set_attribute("code.function.return.value", sanitized_result) # type: ignore - return result - except Exception as e: - span.record_exception(e) - span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore - raise - - # Determine if the function is async - if asyncio.iscoroutinefunction(func): - return async_wrapper - return sync_wrapper - - return decorator - -else: - # Define a no-op decorator if OpenTelemetry is not available - def trace_function(span_name: Optional[str] = None): # pylint: disable=unused-argument - """ - A no-op decorator for tracing function calls when OpenTelemetry is not available. - - :param span_name: Not used in this version. - :type span_name: Optional[str] - :return: The original function. - :rtype: Callable - """ - - def decorator(func: Callable) -> Callable: - return func - - return decorator - - -def sanitize_parameters(func, *args, **kwargs) -> Dict[str, Any]: - """ - Sanitize function parameters to include only built-in data types. - - :param func: The function being decorated. - :type func: Callable - :param args: Positional arguments passed to the function. - :type args: Tuple[Any] - :return: A dictionary of sanitized parameters. - :rtype: Dict[str, Any] - """ - import inspect - - params = inspect.signature(func).parameters - sanitized_params = {} - - for i, (name, param) in enumerate(params.items()): - if param.default == inspect.Parameter.empty and i < len(args): - value = args[i] - else: - value = kwargs.get(name, param.default) - - sanitized_value = sanitize_for_attributes(value) - # Check if the collection has nested collections - if isinstance(sanitized_value, (list, dict, tuple, set)): - if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_value): - sanitized_value = str(sanitized_value) - if sanitized_value is not None: - sanitized_params["code.function.parameter." + name] = sanitized_value - - return sanitized_params - - -# pylint: disable=R0911 -def sanitize_for_attributes(value: Any, is_recursive: bool = False) -> Any: - """ - Sanitize a value to be used as an attribute. - - :param value: The value to sanitize. - :type value: Any - :param is_recursive: Indicates if the function is being called recursively. Default is False. - :type is_recursive: bool - :return: The sanitized value or None if the value is not a supported type. - :rtype: Any - """ - if isinstance(value, (str, int, float, bool)): - return value - if isinstance(value, list): - return [ - sanitize_for_attributes(item, True) - for item in value - if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) - ] - if isinstance(value, dict): - retval = { - k: sanitize_for_attributes(v, True) - for k, v in value.items() - if isinstance(v, (str, int, float, bool, list, dict, tuple, set)) - } - # dict to compatible with span attribute, so return it as a string - if is_recursive: - return retval - return str(retval) - if isinstance(value, tuple): - return tuple( - sanitize_for_attributes(item, True) - for item in value - if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) - ) - if isinstance(value, set): - retval_set = { - sanitize_for_attributes(item, True) - for item in value - if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) - } - if is_recursive: - return retval_set - return str(retval_set) - return None diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_utils.py b/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_utils.py deleted file mode 100644 index 10c6f252a3e5..000000000000 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/telemetry/_utils.py +++ /dev/null @@ -1,155 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -from typing import Optional -import logging -from enum import Enum - -from azure.core.tracing import AbstractSpan, SpanKind # type: ignore -from azure.core.settings import settings # type: ignore - -try: - from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import - - _span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable -except ModuleNotFoundError: - _span_impl_type = None - -logger = logging.getLogger(__name__) - - -GEN_AI_MESSAGE_ID = "gen_ai.message.id" -GEN_AI_MESSAGE_STATUS = "gen_ai.message.status" -GEN_AI_THREAD_ID = "gen_ai.thread.id" -GEN_AI_THREAD_RUN_ID = "gen_ai.thread.run.id" -GEN_AI_AGENT_ID = "gen_ai.agent.id" -GEN_AI_AGENT_NAME = "gen_ai.agent.name" -GEN_AI_AGENT_DESCRIPTION = "gen_ai.agent.description" -GEN_AI_OPERATION_NAME = "gen_ai.operation.name" -GEN_AI_THREAD_RUN_STATUS = "gen_ai.thread.run.status" -GEN_AI_REQUEST_MODEL = "gen_ai.request.model" -GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature" -GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p" -GEN_AI_REQUEST_MAX_INPUT_TOKENS = "gen_ai.request.max_input_tokens" -GEN_AI_REQUEST_MAX_OUTPUT_TOKENS = "gen_ai.request.max_output_tokens" -GEN_AI_RESPONSE_MODEL = "gen_ai.response.model" -GEN_AI_SYSTEM = "gen_ai.system" -SERVER_ADDRESS = "server.address" -AZ_AI_AGENT_SYSTEM = "az.ai.agents" -GEN_AI_TOOL_NAME = "gen_ai.tool.name" -GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id" -GEN_AI_REQUEST_RESPONSE_FORMAT = "gen_ai.request.response_format" -GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens" -GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens" -GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message" -GEN_AI_EVENT_CONTENT = "gen_ai.event.content" -GEN_AI_RUN_STEP_START_TIMESTAMP = "gen_ai.run_step.start.timestamp" -GEN_AI_RUN_STEP_END_TIMESTAMP = "gen_ai.run_step.end.timestamp" -GEN_AI_RUN_STEP_STATUS = "gen_ai.run_step.status" -ERROR_TYPE = "error.type" -ERROR_MESSAGE = "error.message" - - -class OperationName(Enum): - CREATE_AGENT = "create_agent" - CREATE_THREAD = "create_thread" - CREATE_MESSAGE = "create_message" - START_THREAD_RUN = "start_thread_run" - GET_THREAD_RUN = "get_thread_run" - EXECUTE_TOOL = "execute_tool" - LIST_MESSAGES = "list_messages" - LIST_RUN_STEPS = "list_run_steps" - SUBMIT_TOOL_OUTPUTS = "submit_tool_outputs" - PROCESS_THREAD_RUN = "process_thread_run" - - -def trace_tool_execution( - tool_call_id: str, - tool_name: str, - thread_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow - agent_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow - run_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow -) -> "Optional[AbstractSpan]": - span = start_span( - OperationName.EXECUTE_TOOL, - server_address=None, - span_name=f"execute_tool {tool_name}", - thread_id=thread_id, - agent_id=agent_id, - run_id=run_id, - gen_ai_system=None, - ) # it's a client code execution, not GenAI span - if span is not None and span.span_instance.is_recording: - span.add_attribute(GEN_AI_TOOL_CALL_ID, tool_call_id) - span.add_attribute(GEN_AI_TOOL_NAME, tool_name) - - return span - - -def start_span( - operation_name: OperationName, - server_address: Optional[str], - span_name: Optional[str] = None, - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - run_id: Optional[str] = None, - model: Optional[str] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - response_format: Optional[str] = None, - gen_ai_system: Optional[str] = AZ_AI_AGENT_SYSTEM, - kind: SpanKind = SpanKind.CLIENT, -) -> "Optional[AbstractSpan]": - global _span_impl_type # pylint: disable=global-statement - if _span_impl_type is None: - # Try to reinitialize the span implementation type. - # This is a workaround for the case when the tracing implementation is not set up yet when the agent telemetry is imported. - # This code should not even get called if settings.tracing_implementation() returns None since that is also checked in - # _trace_sync_function and _trace_async_function functions in the AIAgentsInstrumentor. - _span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable - if _span_impl_type is None: - return None - - span = _span_impl_type(name=span_name or operation_name.value, kind=kind) - - if span and span.span_instance.is_recording: - if gen_ai_system: - span.add_attribute(GEN_AI_SYSTEM, AZ_AI_AGENT_SYSTEM) - - span.add_attribute(GEN_AI_OPERATION_NAME, operation_name.value) - - if server_address: - span.add_attribute(SERVER_ADDRESS, server_address) - - if thread_id: - span.add_attribute(GEN_AI_THREAD_ID, thread_id) - - if agent_id: - span.add_attribute(GEN_AI_AGENT_ID, agent_id) - - if run_id: - span.add_attribute(GEN_AI_THREAD_RUN_ID, run_id) - - if model: - span.add_attribute(GEN_AI_REQUEST_MODEL, model) - - if temperature: - span.add_attribute(GEN_AI_REQUEST_TEMPERATURE, str(temperature)) - - if top_p: - span.add_attribute(GEN_AI_REQUEST_TOP_P, str(top_p)) - - if max_prompt_tokens: - span.add_attribute(GEN_AI_REQUEST_MAX_INPUT_TOKENS, max_prompt_tokens) - - if max_completion_tokens: - span.add_attribute(GEN_AI_REQUEST_MAX_OUTPUT_TOKENS, max_completion_tokens) - - if response_format: - span.add_attribute(GEN_AI_REQUEST_RESPONSE_FORMAT, response_format) - - return span diff --git a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_trace_configurator.py b/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_trace_configurator.py index 55efabc82f39..8ba54dc149eb 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_trace_configurator.py +++ b/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_trace_configurator.py @@ -27,6 +27,7 @@ def enable_azure_monitor_tracing(self): configure_azure_monitor(connection_string=application_insights_connection_string) try: from azure.ai.agents.telemetry import AIAgentsInstrumentor + agents_instrumentor = AIAgentsInstrumentor() if not agents_instrumentor.is_instrumented(): agents_instrumentor.instrument() @@ -49,11 +50,12 @@ def enable_console_tracing_with_agent(self): tracer = trace.get_tracer(__name__) try: from azure.ai.agents.telemetry import AIAgentsInstrumentor + agents_instrumentor = AIAgentsInstrumentor() if not agents_instrumentor.is_instrumented(): agents_instrumentor.instrument() except Exception as exc: # pylint: disable=broad-exception-caught - print(f"Could not call `AIAgentsInstrumentor().instrument()`. Exception: {exc}") + print(f"Could not call `AIAgentsInstrumentor().instrument()`. Exception: {exc}") print("Console tracing enabled with agent traces.") def display_menu(self): diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py index 4b56849013f4..09da43a6ca7c 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py @@ -54,6 +54,7 @@ async def main() -> None: try: from azure.ai.agents.telemetry import AIAgentsInstrumentor + agents_instrumentor = AIAgentsInstrumentor() if not agents_instrumentor.is_instrumented(): agents_instrumentor.instrument() diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py index d83dc2bac066..517cb4820dfd 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py @@ -48,6 +48,7 @@ try: from azure.ai.agents.telemetry import AIAgentsInstrumentor + agents_instrumentor = AIAgentsInstrumentor() if not agents_instrumentor.is_instrumented(): agents_instrumentor.instrument() diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py index 5ffc4edd99da..984aa21910e3 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py @@ -84,6 +84,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: try: from azure.ai.agents.telemetry import AIAgentsInstrumentor + agents_instrumentor = AIAgentsInstrumentor() if not agents_instrumentor.is_instrumented(): agents_instrumentor.instrument() diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py index 68bae9d2cef0..9b5711752952 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py @@ -52,6 +52,7 @@ try: from azure.ai.agents.telemetry import AIAgentsInstrumentor + agents_instrumentor = AIAgentsInstrumentor() if not agents_instrumentor.is_instrumented(): agents_instrumentor.instrument() @@ -106,7 +107,7 @@ def fetch_weather(location: str) -> str: model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are a helpful agent", - toolset=toolset + toolset=toolset, ) print(f"Created agent, ID: {agent.id}") diff --git a/sdk/ai/azure-ai-agents/sdk_packaging.toml b/sdk/ai/azure-ai-agents/sdk_packaging.toml new file mode 100644 index 000000000000..e7687fdae93b --- /dev/null +++ b/sdk/ai/azure-ai-agents/sdk_packaging.toml @@ -0,0 +1,2 @@ +[packaging] +auto_update = false \ No newline at end of file diff --git a/sdk/ai/azure-ai-agents/azure/__init__.py b/sdk/ai/azure-ai-agents/servicepatterns/__init__.py similarity index 100% rename from sdk/ai/azure-ai-agents/azure/__init__.py rename to sdk/ai/azure-ai-agents/servicepatterns/__init__.py diff --git a/sdk/ai/azure-ai-agents/servicepatterns/models/__init__.py b/sdk/ai/azure-ai-agents/servicepatterns/models/__init__.py new file mode 100644 index 000000000000..98da37b9e98c --- /dev/null +++ b/sdk/ai/azure-ai-agents/servicepatterns/models/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models import ( # type: ignore + AgentV1Error, + AgentV1ErrorError, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AgentV1Error", + "AgentV1ErrorError", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-agents/servicepatterns/models/_models.py b/sdk/ai/azure-ai-agents/servicepatterns/models/_models.py new file mode 100644 index 000000000000..ce85adc3350d --- /dev/null +++ b/sdk/ai/azure-ai-agents/servicepatterns/models/_models.py @@ -0,0 +1,82 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation + +from typing import Any, Mapping, Optional, TYPE_CHECKING, overload + +from ...azure.ai.agents._utils.model_base import Model as _Model, rest_field + +if TYPE_CHECKING: + from .. import models as _models + + +class AgentV1Error(_Model): + """Error payload returned by the agents API. + + :ivar error: Represents the error. Required. + :vartype error: ~servicepatterns.models.AgentV1ErrorError + """ + + error: "_models.AgentV1ErrorError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Represents the error. Required.""" + + @overload + def __init__( + self, + *, + error: "_models.AgentV1ErrorError", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AgentV1ErrorError(_Model): + """AgentV1ErrorError. + + :ivar message: + :vartype message: str + :ivar type: + :vartype type: str + :ivar param: + :vartype param: str + :ivar code: + :vartype code: str + """ + + message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + param: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + code: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + + @overload + def __init__( + self, + *, + message: Optional[str] = None, + type: Optional[str] = None, + param: Optional[str] = None, + code: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) diff --git a/sdk/ai/azure-ai-agents/servicepatterns/models/_patch.py b/sdk/ai/azure-ai-agents/servicepatterns/models/_patch.py new file mode 100644 index 000000000000..8bcb627aa475 --- /dev/null +++ b/sdk/ai/azure-ai-agents/servicepatterns/models/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-agents/setup.py b/sdk/ai/azure-ai-agents/setup.py index d89a4f5cd3a7..c534c40525df 100644 --- a/sdk/ai/azure-ai-agents/setup.py +++ b/sdk/ai/azure-ai-agents/setup.py @@ -51,16 +51,6 @@ zip_safe=False, packages=find_packages( exclude=[ - "samples", - "samples.utils", - "samples.agents_tools", - "samples.agents_async", - "samples.agents_multiagent", - "samples.agents_streaming", - "samples.agents_telemetry", - "samples.agents_tools", - "samples.assets", - "samples.agents_async.utils", "tests", # Exclude packages that will be covered by PEP420 or nspkg "azure", diff --git a/sdk/ai/azure-ai-agents/tsp-location.yaml b/sdk/ai/azure-ai-agents/tsp-location.yaml index cf944b1f937d..24d72a945550 100644 --- a/sdk/ai/azure-ai-agents/tsp-location.yaml +++ b/sdk/ai/azure-ai-agents/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Agents -commit: 2f0af7bbb6340f5b3f89ab423267de352b834f7b +commit: d002441e30d6db280f92fe19b885cc68279c591c repo: Azure/azure-rest-api-specs additionalDirectories: