From a82b3c2dd6fe98d65686e92f4b1106ccfa9f6aa9 Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Wed, 23 Apr 2025 16:32:57 +0000 Subject: [PATCH] CodeGen from PR 34135 in Azure/azure-rest-api-specs Merge 2bf558dadf671935af2255088c8bf85768e4c98f into 700384de9c700a7b1f4b254c0f09b768a1f14987 --- sdk/ai/azure-ai-projects/_meta.json | 6 + .../azure/ai/projects/_client.py | 13 +- .../azure/ai/projects/_model_base.py | 27 +- .../azure/ai/projects/_patch.py | 299 +- .../azure/ai/projects/_serialization.py | 26 +- .../azure/ai/projects/_version.py | 2 +- .../azure/ai/projects/aio/_client.py | 13 +- .../azure/ai/projects/aio/_patch.py | 314 +- .../ai/projects/aio/operations/_operations.py | 681 ++-- .../ai/projects/aio/operations/_patch.py | 3219 +--------------- .../azure/ai/projects/models/_patch.py | 1942 +--------- .../ai/projects/operations/_operations.py | 683 ++-- .../azure/ai/projects/operations/_patch.py | 3368 +---------------- .../azure/ai/projects/prompts/__init__.py | 16 - .../azure/ai/projects/prompts/_patch.py | 124 - .../azure/ai/projects/prompts/_utils.py | 39 - .../azure/ai/projects/telemetry/__init__.py | 14 - .../ai/projects/telemetry/_trace_function.py | 204 - .../ai/projects/telemetry/agents/__init__.py | 13 - .../agents/_ai_agents_instrumentor.py | 1907 ---------- .../ai/projects/telemetry/agents/_utils.py | 139 - sdk/ai/azure-ai-projects/sdk_packaging.toml | 2 + sdk/ai/azure-ai-projects/setup.py | 52 +- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 24 files changed, 532 insertions(+), 12573 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/_meta.json delete mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/prompts/__init__.py delete mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/prompts/_patch.py delete mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/prompts/_utils.py delete mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py delete mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py delete mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py delete mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py delete mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py create mode 100644 sdk/ai/azure-ai-projects/sdk_packaging.toml diff --git a/sdk/ai/azure-ai-projects/_meta.json b/sdk/ai/azure-ai-projects/_meta.json new file mode 100644 index 000000000000..e552d88bcfe2 --- /dev/null +++ b/sdk/ai/azure-ai-projects/_meta.json @@ -0,0 +1,6 @@ +{ + "commit": "d5463f01235f1f0f939738ecfd706026e084e8f3", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/ai/Azure.AI.Projects", + "@azure-tools/typespec-python": "0.43.0" +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py index 853feb83403d..de7c61d1b19b 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py @@ -72,6 +72,7 @@ def __init__( credential=credential, **kwargs ) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -119,16 +120,12 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: request_copy = deepcopy(request) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py index 3072ee252ed9..49d5c7259389 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py @@ -2,8 +2,9 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=protected-access, broad-except @@ -21,6 +22,7 @@ from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import xml.etree.ElementTree as ET +from collections.abc import MutableMapping from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError @@ -28,11 +30,6 @@ from azure.core.pipeline import PipelineResponse from azure.core.serialization import _Null -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping - _LOGGER = logging.getLogger(__name__) __all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] @@ -347,7 +344,7 @@ def _get_model(module_name: str, model_name: str): _UNSET = object() -class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object +class _MyMutableMapping(MutableMapping[str, typing.Any]): def __init__(self, data: typing.Dict[str, typing.Any]) -> None: self._data = data @@ -407,13 +404,13 @@ def get(self, key: str, default: typing.Any = None) -> typing.Any: return default @typing.overload - def pop(self, key: str) -> typing.Any: ... + def pop(self, key: str) -> typing.Any: ... # pylint: disable=arguments-differ @typing.overload - def pop(self, key: str, default: _T) -> _T: ... + def pop(self, key: str, default: _T) -> _T: ... # pylint: disable=signature-differs @typing.overload - def pop(self, key: str, default: typing.Any) -> typing.Any: ... + def pop(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: """ @@ -443,7 +440,7 @@ def clear(self) -> None: """ self._data.clear() - def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: # pylint: disable=arguments-differ """ Updates D from mapping/iterable E and F. :param any args: Either a mapping object or an iterable of key-value pairs. @@ -454,7 +451,7 @@ def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: def setdefault(self, key: str, default: None = None) -> None: ... @typing.overload - def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: """ @@ -644,7 +641,7 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - return super().__new__(cls) # pylint: disable=no-value-for-parameter + return super().__new__(cls) def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: @@ -680,7 +677,7 @@ def _deserialize(cls, data, exist_discriminators): discriminator_value = data.find(xml_name).text # pyright: ignore else: discriminator_value = data.get(discriminator._rest_name) - mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member return mapped_cls._deserialize(data, exist_discriminators) def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 06ceb9c67f05..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -1,300 +1,15 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import uuid -from os import PathLike -from pathlib import Path -from typing import Any, Dict, List, Tuple, Union, Optional -from typing_extensions import Self +from typing import List -from azure.core import PipelineClient -from azure.core.credentials import TokenCredential -from azure.core.pipeline import policies - -from ._client import AIProjectClient as ClientGenerated -from ._configuration import AIProjectClientConfiguration -from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, TelemetryOperations -from .operations._patch import InferenceOperations - - -class AIProjectClient( - ClientGenerated -): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes - def __init__( # pylint: disable=super-init-not-called,too-many-statements - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - # TODO: Validate input formats with regex match (e.g. subscription ID) - if not endpoint: - raise ValueError("endpoint is required") - if not subscription_id: - raise ValueError("subscription_id ID is required") - if not resource_group_name: - raise ValueError("resource_group_name is required") - if not project_name: - raise ValueError("project_name is required") - if not credential: - raise ValueError("credential is required") - if "api_version" in kwargs: - raise ValueError("No support for overriding the API version") - if "credential_scopes" in kwargs: - raise ValueError("No support for overriding the credential scopes") - - kwargs0 = kwargs.copy() - kwargs1 = kwargs.copy() - kwargs2 = kwargs.copy() - kwargs3 = kwargs.copy() - - self._user_agent: Optional[str] = kwargs.get("user_agent", None) - - # For getting AppInsights connection string from the AppInsights resource. - # The AppInsights resource URL is not known at this point. We need to get it from the - # AzureML "Workspace - Get" REST API call. It will have the form: - # https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} - _endpoint0 = "https://management.azure.com" - self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2020-02-02", - credential_scopes=["https://management.azure.com/.default"], - **kwargs0, - ) - - _policies0 = kwargs0.pop("policies", None) - if _policies0 is None: - _policies0 = [ - policies.RequestIdPolicy(**kwargs0), - self._config0.headers_policy, - self._config0.user_agent_policy, - self._config0.proxy_policy, - policies.ContentDecodePolicy(**kwargs0), - self._config0.redirect_policy, - self._config0.retry_policy, - self._config0.authentication_policy, - self._config0.custom_hook_policy, - self._config0.logging_policy, - policies.DistributedTracingPolicy(**kwargs0), - policies.SensitiveHeaderCleanupPolicy(**kwargs0) if self._config0.redirect_policy else None, - self._config0.http_logging_policy, - ] - self._client0: PipelineClient = PipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) - - # For Endpoints operations (listing connections, getting connection properties, getting project properties) - _endpoint1 = ( - "https://management.azure.com/" - + f"subscriptions/{subscription_id}/" - + f"resourceGroups/{resource_group_name}/" - + "providers/Microsoft.MachineLearningServices/" - + f"workspaces/{project_name}" - ) - self._config1: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", - credential_scopes=["https://management.azure.com/.default"], - **kwargs1, - ) - _policies1 = kwargs1.pop("policies", None) - if _policies1 is None: - _policies1 = [ - policies.RequestIdPolicy(**kwargs1), - self._config1.headers_policy, - self._config1.user_agent_policy, - self._config1.proxy_policy, - policies.ContentDecodePolicy(**kwargs1), - self._config1.redirect_policy, - self._config1.retry_policy, - self._config1.authentication_policy, - self._config1.custom_hook_policy, - self._config1.logging_policy, - policies.DistributedTracingPolicy(**kwargs1), - policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, - self._config1.http_logging_policy, - ] - self._client1: PipelineClient = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) - - # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config2 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-12-01-preview", - credential_scopes=["https://ml.azure.com/.default"], - **kwargs2, - ) - _policies2 = kwargs2.pop("policies", None) - if _policies2 is None: - _policies2 = [ - policies.RequestIdPolicy(**kwargs2), - self._config2.headers_policy, - self._config2.user_agent_policy, - self._config2.proxy_policy, - policies.ContentDecodePolicy(**kwargs2), - self._config2.redirect_policy, - self._config2.retry_policy, - self._config2.authentication_policy, - self._config2.custom_hook_policy, - self._config2.logging_policy, - policies.DistributedTracingPolicy(**kwargs2), - policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, - self._config2.http_logging_policy, - ] - self._client2: PipelineClient = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) - - # For Cloud Evaluations operations - # cSpell:disable-next-line - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config3 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com/.default"], # TODO: Update once service changes are ready - **kwargs3, - ) - _policies3 = kwargs3.pop("policies", None) - if _policies3 is None: - _policies3 = [ - policies.RequestIdPolicy(**kwargs3), - self._config3.headers_policy, - self._config3.user_agent_policy, - self._config3.proxy_policy, - policies.ContentDecodePolicy(**kwargs3), - self._config3.redirect_policy, - self._config3.retry_policy, - self._config3.authentication_policy, - self._config3.custom_hook_policy, - self._config3.logging_policy, - policies.DistributedTracingPolicy(**kwargs3), - policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, - self._config3.http_logging_policy, - ] - self._client3: PipelineClient = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - self.telemetry = TelemetryOperations( - self._client0, self._config0, self._serialize, self._deserialize, outer_instance=self - ) - self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) - self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) - self.inference = InferenceOperations(self) - - def close(self) -> None: - self._client0.close() - self._client1.close() - self._client2.close() - self._client3.close() - - def __enter__(self) -> Self: - self._client0.__enter__() - self._client1.__enter__() - self._client2.__enter__() - self._client3.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client0.__exit__(*exc_details) - self._client1.__exit__(*exc_details) - self._client2.__exit__(*exc_details) - self._client3.__exit__(*exc_details) - - @classmethod - def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> Self: - """ - Create an AIProjectClient from a connection string. - - :param str conn_str: The connection string, copied from your AI Foundry project. - :param TokenCredential credential: Credential used to authenticate requests to the service. - :return: An AIProjectClient instance. - :rtype: AIProjectClient - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - - def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: - """Upload a file to the Azure AI Foundry project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The tuple, containing asset id and asset URI of uploaded file. - :rtype: Tuple[str] - """ - try: - from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.constants import AssetTypes # type: ignore - from azure.ai.ml.entities import Data # type: ignore - except ImportError as e: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) from e - - data = Data( - path=str(file_path), - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - - ml_client = MLClient( - self._config3.credential, - self._config3.subscription_id, - self._config3.resource_group_name, - self._config3.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id, data_asset.path - - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config3.subscription_id, - "resource_group_name": self._config3.resource_group_name, - "project_name": self._config3.project_name, - } - - -__all__: List[str] = [ - "AIProjectClient", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py index 7a0232de5ddc..eb86ea23c965 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py @@ -1,28 +1,10 @@ # pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 # -------------------------------------------------------------------------- -# # Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pyright: reportUnnecessaryTypeIgnoreComment=false diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py index b1c2836b6921..be71c81bd282 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b9" +VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py index 7bfca0ef6b4a..16111c747201 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py @@ -72,6 +72,7 @@ def __init__( credential=credential, **kwargs ) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -121,16 +122,12 @@ def send_request( request_copy = deepcopy(request) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 6e36aa81fed5..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -1,315 +1,15 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import uuid -from os import PathLike -from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Self +from typing import List -from azure.core import AsyncPipelineClient -from azure.core.pipeline import policies - -from .._serialization import Deserializer, Serializer -from ._client import AIProjectClient as ClientGenerated -from ._configuration import AIProjectClientConfiguration -from .operations import ( - AgentsOperations, - ConnectionsOperations, - EvaluationsOperations, - TelemetryOperations, -) -from .operations._patch import _SyncCredentialWrapper, InferenceOperations - -if TYPE_CHECKING: - from azure.core.credentials import AccessToken - from azure.core.credentials_async import AsyncTokenCredential - - -class AIProjectClient( - ClientGenerated -): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes - def __init__( # pylint: disable=super-init-not-called,too-many-statements - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - # TODO: Validate input formats with regex match (e.g. subscription ID) - if not endpoint: - raise ValueError("endpoint is required") - if not subscription_id: - raise ValueError("subscription_id ID is required") - if not resource_group_name: - raise ValueError("resource_group_name is required") - if not project_name: - raise ValueError("project_name is required") - if not credential: - raise ValueError("credential is required") - if "api_version" in kwargs: - raise ValueError("No support for overriding the API version") - if "credential_scopes" in kwargs: - raise ValueError("No support for overriding the credential scopes") - - kwargs0 = kwargs.copy() - kwargs1 = kwargs.copy() - kwargs2 = kwargs.copy() - kwargs3 = kwargs.copy() - - self._user_agent: Optional[str] = kwargs.get("user_agent", None) - - # For getting AppInsights connection string from the AppInsights resource. - # The AppInsights resource URL is not known at this point. We need to get it from the - # AzureML "Workspace - Get" REST API call. It will have the form: - # https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} # pylint: disable=line-too-long - _endpoint0 = "https://management.azure.com" # pylint: disable=line-too-long - self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2020-02-02", - credential_scopes=["https://management.azure.com/.default"], - **kwargs0, - ) - - _policies0 = kwargs0.pop("policies", None) - if _policies0 is None: - _policies0 = [ - policies.RequestIdPolicy(**kwargs0), - self._config0.headers_policy, - self._config0.user_agent_policy, - self._config0.proxy_policy, - policies.ContentDecodePolicy(**kwargs0), - self._config0.redirect_policy, - self._config0.retry_policy, - self._config0.authentication_policy, - self._config0.custom_hook_policy, - self._config0.logging_policy, - policies.DistributedTracingPolicy(**kwargs0), - (policies.SensitiveHeaderCleanupPolicy(**kwargs0) if self._config0.redirect_policy else None), - self._config0.http_logging_policy, - ] - self._client0: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) - - # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" - self._config1: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", - credential_scopes=["https://management.azure.com/.default"], - **kwargs1, - ) - _policies1 = kwargs1.pop("policies", None) - if _policies1 is None: - _policies1 = [ - policies.RequestIdPolicy(**kwargs1), - self._config1.headers_policy, - self._config1.user_agent_policy, - self._config1.proxy_policy, - policies.ContentDecodePolicy(**kwargs1), - self._config1.redirect_policy, - self._config1.retry_policy, - self._config1.authentication_policy, - self._config1.custom_hook_policy, - self._config1.logging_policy, - policies.DistributedTracingPolicy(**kwargs1), - (policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None), - self._config1.http_logging_policy, - ] - self._client1: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) - - # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config2: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-12-01-preview", - credential_scopes=["https://ml.azure.com/.default"], - **kwargs2, - ) - _policies2 = kwargs2.pop("policies", None) - if _policies2 is None: - _policies2 = [ - policies.RequestIdPolicy(**kwargs2), - self._config2.headers_policy, - self._config2.user_agent_policy, - self._config2.proxy_policy, - policies.ContentDecodePolicy(**kwargs2), - self._config2.redirect_policy, - self._config2.retry_policy, - self._config2.authentication_policy, - self._config2.custom_hook_policy, - self._config2.logging_policy, - policies.DistributedTracingPolicy(**kwargs2), - (policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None), - self._config2.http_logging_policy, - ] - self._client2: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) - - # For Cloud Evaluations operations - # cSpell:disable-next-line - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config3: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com/.default"], # TODO: Update once service changes are ready - **kwargs3, - ) - _policies3 = kwargs3.pop("policies", None) - if _policies3 is None: - _policies3 = [ - policies.RequestIdPolicy(**kwargs3), - self._config3.headers_policy, - self._config3.user_agent_policy, - self._config3.proxy_policy, - policies.ContentDecodePolicy(**kwargs3), - self._config3.redirect_policy, - self._config3.retry_policy, - self._config3.authentication_policy, - self._config3.custom_hook_policy, - self._config3.logging_policy, - policies.DistributedTracingPolicy(**kwargs3), - (policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None), - self._config3.http_logging_policy, - ] - self._client3: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - self.telemetry = TelemetryOperations( - self._client0, - self._config0, - self._serialize, - self._deserialize, - outer_instance=self, - ) - self._credential = credential - self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) - self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) - self.inference = InferenceOperations(self) - - async def close(self) -> None: - await self._client0.close() - await self._client1.close() - await self._client2.close() - await self._client3.close() - - async def __aenter__(self) -> Self: - await self._client0.__aenter__() - await self._client1.__aenter__() - await self._client2.__aenter__() - await self._client3.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client0.__aexit__(*exc_details) - await self._client1.__aexit__(*exc_details) - await self._client2.__aexit__(*exc_details) - await self._client3.__aexit__(*exc_details) - - @classmethod - def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> Self: - """ - Create an asynchronous AIProjectClient from a connection string. - - :param str conn_str: The connection string, copied from your AI Foundry project. - :param AsyncTokenCredential credential: Credential used to authenticate requests to the service. - :return: An AIProjectClient instance. - :rtype: AIProjectClient - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls( - endpoint, - subscription_id, - resource_group_name, - project_name, - credential, - **kwargs, - ) - - def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: - """Upload a file to the Azure AI Foundry project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The tuple, containing asset id and asset URI of uploaded file. - :rtype: Tuple[str, str] - """ - try: - from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.constants import AssetTypes # type: ignore - from azure.ai.ml.entities import Data # type: ignore - except ImportError as e: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) from e - - data = Data( - path=str(file_path), - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - # We have to wrap async method get_token of - - ml_client = MLClient( - _SyncCredentialWrapper(self._config3.credential), - self._config3.subscription_id, - self._config3.resource_group_name, - self._config3.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id, data_asset.path - - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config3.subscription_id, - "resource_group_name": self._config3.resource_group_name, - "project_name": self._config3.project_name, - } - - -__all__: List[str] = [ - "AIProjectClient", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index b1f9ba8cb084..5a567bf38b88 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -6,9 +6,9 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase import json -import sys from typing import ( Any, AsyncIterable, @@ -105,14 +105,9 @@ ) from .._configuration import AIProjectClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore - if TYPE_CHECKING: from ... import _types -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +JSON = MutableMapping[str, Any] _Unset: Any = object() T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] @@ -336,16 +331,12 @@ async def create_agent( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -431,16 +422,12 @@ async def list_agents( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -500,16 +487,12 @@ async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -755,16 +738,12 @@ async def update_agent( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -824,16 +803,12 @@ async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDelet params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -993,16 +968,12 @@ async def create_thread( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1062,16 +1033,12 @@ async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1234,16 +1201,12 @@ async def update_thread( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1303,16 +1266,12 @@ async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDe params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1398,16 +1357,12 @@ async def list_threads( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1594,16 +1549,12 @@ async def create_message( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1697,16 +1648,12 @@ async def list_messages( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1769,16 +1716,12 @@ async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _ params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1938,16 +1881,12 @@ async def update_message( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2312,16 +2251,12 @@ async def create_run( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2411,16 +2346,12 @@ async def list_runs( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2483,16 +2414,12 @@ async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.T params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2652,16 +2579,12 @@ async def update_run( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2833,16 +2756,12 @@ async def submit_tool_outputs_to_run( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2905,16 +2824,12 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3231,16 +3146,12 @@ async def create_thread_and_run( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3320,16 +3231,12 @@ async def get_run_step( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3430,16 +3337,12 @@ async def list_run_steps( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3503,16 +3406,12 @@ async def list_files( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3585,16 +3484,12 @@ async def _upload_file( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3654,16 +3549,12 @@ async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletion params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3723,16 +3614,12 @@ async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3792,16 +3679,12 @@ async def _get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[ params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3884,16 +3767,12 @@ async def list_vector_stores( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4074,16 +3953,12 @@ async def create_vector_store( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4143,16 +4018,12 @@ async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4313,16 +4184,12 @@ async def modify_vector_store( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4383,16 +4250,12 @@ async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _mod params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4487,16 +4350,12 @@ async def list_vector_store_files( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4653,16 +4512,12 @@ async def create_vector_store_file( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4725,16 +4580,12 @@ async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwar params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4802,16 +4653,12 @@ async def delete_vector_store_file( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4968,16 +4815,12 @@ async def create_vector_store_file_batch( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5042,16 +4885,12 @@ async def get_vector_store_file_batch( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5117,16 +4956,12 @@ async def cancel_vector_store_file_batch( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5225,16 +5060,12 @@ async def list_vector_store_file_batch_files( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5309,16 +5140,12 @@ async def _get_workspace(self, **kwargs: Any) -> _models._models.GetWorkspaceRes params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5396,16 +5223,12 @@ async def _list_connections( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5467,16 +5290,12 @@ async def _get_connection(self, connection_name: str, **kwargs: Any) -> _models. params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5572,16 +5391,12 @@ async def _get_connection_with_secrets( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5666,16 +5481,12 @@ async def _get_app_insights( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5755,16 +5566,12 @@ async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5887,16 +5694,12 @@ async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], * params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5966,18 +5769,14 @@ def prepare_request(next_link=None): params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + "self._config.subscription_id", self._config.subscription_id, "str" ), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5995,18 +5794,14 @@ def prepare_request(next_link=None): "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + "self._config.subscription_id", self._config.subscription_id, "str" ), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6140,16 +5935,12 @@ async def update( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6215,16 +6006,12 @@ async def get_schedule(self, name: str, **kwargs: Any) -> _models.EvaluationSche params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6362,16 +6149,12 @@ async def create_or_replace_schedule( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6446,18 +6229,14 @@ def prepare_request(next_link=None): params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + "self._config.subscription_id", self._config.subscription_id, "str" ), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6475,18 +6254,14 @@ def prepare_request(next_link=None): "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + "self._config.subscription_id", self._config.subscription_id, "str" ), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6546,16 +6321,12 @@ async def disable_schedule(self, name: str, **kwargs: Any) -> None: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index b292a194771e..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -1,3220 +1,15 @@ -# pylint: disable=too-many-lines -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio # pylint: disable=do-not-import-asyncio -import concurrent.futures -import io -import logging -import os -import time -from pathlib import Path -from typing import ( - IO, - TYPE_CHECKING, - Any, - AsyncIterator, - Dict, - List, - MutableMapping, - Optional, - Sequence, - TextIO, - Union, - cast, - Callable, - Set, - overload, -) +from typing import List -from azure.core.credentials import TokenCredential -from azure.core.exceptions import ResourceNotFoundError -from azure.core.tracing.decorator_async import distributed_trace_async - -from ... import models as _models -from ..._vendor import FileType -from ...models._enums import AuthenticationType, ConnectionType, FilePurpose, RunStatus -from ...models._models import ( - GetAppInsightsResponse, - GetConnectionResponse, - GetWorkspaceResponse, - InternalConnectionPropertiesSASAuth, - ListConnectionsResponse, -) -from ...models._patch import ConnectionProperties -from ...operations._patch import _enable_telemetry -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import TelemetryOperations as TelemetryOperationsGenerated - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from openai import AsyncAzureOpenAI - - from azure.ai.inference.aio import ChatCompletionsClient, EmbeddingsClient, ImageEmbeddingsClient - from azure.ai.projects import _types - from azure.core.credentials import AccessToken - from azure.core.credentials_async import AsyncTokenCredential - -logger = logging.getLogger(__name__) - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - - -class InferenceOperations: - - def __init__(self, outer_instance): - - # All returned inference clients will have this application id set on their user-agent. - # For more info on user-agent HTTP header, see: - # https://azure.github.io/azure-sdk/general_azurecore.html#telemetry-policy - USER_AGENT_APP_ID = "AIProjectClient" - - if hasattr(outer_instance, "_user_agent") and outer_instance._user_agent: - # If the calling application has set "user_agent" when constructing the AIProjectClient, - # take that value and prepend it to USER_AGENT_APP_ID. - self._user_agent = f"{outer_instance._user_agent}-{USER_AGENT_APP_ID}" - else: - self._user_agent = USER_AGENT_APP_ID - - self._outer_instance = outer_instance - - @distributed_trace_async - async def get_chat_completions_client( - self, *, connection_name: Optional[str] = None, **kwargs - ) -> "ChatCompletionsClient": - """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - ChatCompletionsClient. - - At least one AI model that supports chat completions must be deployed in this resource. - - .. note:: The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated chat completions client. - :rtype: ~azure.ai.inference.ChatCompletionsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True - ) - else: - if use_serverless_connection: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_chat_completions_client] connection = %s", str(connection)) - - try: - from azure.ai.inference.aio import ChatCompletionsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_chat_completions_client]" - + " Creating ChatCompletionsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_chat_completions_client]" - + " Creating ChatCompletionsClient using Entra ID authentication" - ) - client = ChatCompletionsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_chat_completions_client] " - + "Creating ChatCompletionsClient using SAS authentication" - ) - raise ValueError( - "Getting chat completions client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_embeddings_client(self, *, connection_name: Optional[str] = None, **kwargs) -> "EmbeddingsClient": - """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - EmbeddingsClient. - - At least one AI model that supports text embeddings must be deployed in this resource. - - .. note:: The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated text embeddings client - :rtype: ~azure.ai.inference.EmbeddingsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True - ) - else: - if use_serverless_connection: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) - - try: - from azure.ai.inference.aio import EmbeddingsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = EmbeddingsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" - ) - client = EmbeddingsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" - ) - raise ValueError("Getting embeddings client from a connection with SAS authentication is not yet supported") - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_image_embeddings_client( - self, *, connection_name: Optional[str] = None, **kwargs - ) -> "ImageEmbeddingsClient": - """Get an authenticated asynchronous ImageEmbeddingsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - ImageEmbeddingsClient. - - At least one AI model that supports image embeddings must be deployed in this resource. - - .. note:: The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated image embeddings client - :rtype: ~azure.ai.inference.ImageEmbeddingsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True - ) - else: - if use_serverless_connection: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) - - try: - from azure.ai.inference.aio import ImageEmbeddingsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ImageEmbeddingsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using Entra ID authentication" - ) - client = ImageEmbeddingsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using SAS authentication" - ) - raise ValueError("Getting embeddings client from a connection with SAS authentication is not yet supported") - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_azure_openai_client( - self, *, api_version: Optional[str] = None, connection_name: Optional[str] = None, **kwargs - ) -> "AsyncAzureOpenAI": - """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default - Azure OpenAI connection (if `connection_name` is not specificed), or from the Azure OpenAI - resource given by its connection name. - - .. note:: The package `openai` must be installed prior to calling this method. - - :keyword api_version: The Azure OpenAI api-version to use when creating the client. Optional. - See "Data plane - Inference" row in the table at - https://learn.microsoft.com/azure/ai-services/openai/reference#api-specs. If this keyword - is not specified, you must set the environment variable `OPENAI_API_VERSION` instead. - :paramtype api_version: str - :keyword connection_name: The name of a connection to an Azure OpenAI resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure OpenAI connection will be used. - :type connection_name: str - - :return: An authenticated AsyncAzureOpenAI client - :rtype: ~openai.AsyncAzureOpenAI - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure OpenAI connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `openai` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - if connection_name: - connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs - ) - else: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, include_credentials=True, **kwargs - ) - - logger.debug("[InferenceOperations.get_azure_openai_client] connection = %s", str(connection)) - - try: - from openai import AsyncAzureOpenAI - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenAI SDK is not installed. Please install it using 'pip install openai-async'" - ) from e - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" - ) - client = AsyncAzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_azure_openai_client] " + "Creating AzureOpenAI using Entra ID authentication" - ) - try: - from azure.identity.aio import get_bearer_token_provider - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "azure.identity package not installed. Please install it using 'pip install azure-identity'" - ) from e - client = AsyncAzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=api_version, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_azure_openai_client] " + "Creating AzureOpenAI using SAS authentication" - ) - raise ValueError( - "Getting an AzureOpenAI client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - -class ConnectionsOperations(ConnectionsOperationsGenerated): - - @distributed_trace_async - async def get_default( - self, *, connection_type: ConnectionType, include_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if there are no connections of the given type. - - .. note:: - `get_default(connection_type=ConnectionType.AZURE_BLOB_STORAGE, include_credentials=True)` does not - currently work. It does work with `include_credentials=False`. - - :keyword connection_type: The connection type. Required. - :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :keyword include_credentials: Whether to populate the connection properties with authentication credentials. - Optional. - :type include_credentials: bool - :return: The connection properties. - :rtype: ~azure.ai.projects.model.ConnectionProperties - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_type: - raise ValueError("You must specify an connection type") - # Since there is no notion of default connection at the moment, list all connections in the category - # and return the first one (index 0), unless overridden by the environment variable DEFAULT_CONNECTION_INDEX. - connection_properties_list = await self.list(connection_type=connection_type, **kwargs) - if len(connection_properties_list) > 0: - default_connection_index = int(os.getenv("DEFAULT_CONNECTION_INDEX", "0")) - if include_credentials: - return await self.get( - connection_name=connection_properties_list[default_connection_index].name, - include_credentials=include_credentials, - **kwargs, - ) - return connection_properties_list[default_connection_index] - raise ResourceNotFoundError(f"No connection of type {connection_type} found") - - @distributed_trace_async - async def get( - self, *, connection_name: str, include_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if a connection with the given name was not found. - - .. note:: This method is not supported for Azure Blob Storage connections. - - :keyword connection_name: Connection Name. Required. - :type connection_name: str - :keyword include_credentials: Whether to populate the connection properties with authentication credentials. - Optional. - :type include_credentials: bool - :return: The connection properties, or `None` if a connection with this name does not exist. - :rtype: ~azure.ai.projects.models.ConnectionProperties - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_name: - raise ValueError("Connection name cannot be empty") - if include_credentials: - connection: GetConnectionResponse = await self._get_connection_with_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - if connection.properties.auth_type == AuthenticationType.ENTRA_ID: - return ConnectionProperties(connection=connection, token_credential=self._config.credential) - if connection.properties.auth_type == AuthenticationType.SAS: - from ...models._patch import SASTokenCredential - - cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) - sync_credential = _SyncCredentialWrapper(self._config.credential) - - token_credential = SASTokenCredential( - sas_token=cred_prop.credentials.sas, - credential=sync_credential, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - project_name=self._config.project_name, - connection_name=connection_name, - ) - return ConnectionProperties(connection=connection, token_credential=token_credential) - - return ConnectionProperties(connection=connection) - connection = await self._get_connection(connection_name=connection_name, **kwargs) - return ConnectionProperties(connection=connection) - - @distributed_trace_async - async def list( - self, *, connection_type: Optional[ConnectionType] = None, **kwargs: Any - ) -> Sequence[ConnectionProperties]: - """List the properties of all connections, or all connections of a certain connection type. - - :keyword connection_type: The connection type. Optional. If provided, this method lists connections of this - type. If not provided, all connections are listed. - :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :return: A list of connection properties - :rtype: Iterable[~azure.ai.projects.models._models.ConnectionProperties] - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connections_list: ListConnectionsResponse = await self._list_connections( - include_all=True, category=connection_type, **kwargs - ) - - # Iterate to create the simplified result property - connection_properties_list: List[ConnectionProperties] = [] - for connection in connections_list.value: - connection_properties_list.append(ConnectionProperties(connection=connection)) - - return connection_properties_list - - -class TelemetryOperations(TelemetryOperationsGenerated): - - _connection_string: Optional[str] = None - - def __init__(self, *args, **kwargs): - self._outer_instance = kwargs.pop("outer_instance") - super().__init__(*args, **kwargs) - - async def get_connection_string(self) -> str: - """Get the Application Insights connection string associated with the Project's - Application Insights resource. - - :return: The Application Insights connection string if a the resource was enabled for the Project. - :rtype: str - :raises ~azure.core.exceptions.ResourceNotFoundError: Application Insights resource was not enabled - for this project. - """ - if not self._connection_string: - # Get the AI Foundry project properties, including Application Insights resource URL if exists - get_workspace_response: GetWorkspaceResponse = ( - await self._outer_instance.connections._get_workspace() # pylint: disable=protected-access - ) - - if not get_workspace_response.properties.application_insights: - raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") - - # Make a GET call to the Application Insights resource URL to get the connection string - app_insights_respose: GetAppInsightsResponse = await self._get_app_insights( - app_insights_resource_url=get_workspace_response.properties.application_insights - ) - - self._connection_string = app_insights_respose.properties.connection_string - - return self._connection_string - - # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? - # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry - def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> None: - """Enables distributed tracing and logging with OpenTelemetry for Azure AI clients and - popular GenAI libraries. - - Following instrumentations are enabled (when corresponding packages are installed): - - - Azure AI Inference (`azure-ai-inference`) - - Azure AI Projects (`azure-ai-projects`) - - OpenAI (`opentelemetry-instrumentation-openai-v2`) - - Langchain (`opentelemetry-instrumentation-langchain`) - - The recording of prompt and completion messages is disabled by default. To enable it, set the - `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED` environment variable to `true`. - - When destination is provided, the method configures OpenTelemetry SDK to export traces to - stdout or OTLP (OpenTelemetry protocol) gRPC endpoint. It's recommended for local - development only. For production use, make sure to configure OpenTelemetry SDK directly. - - :keyword destination: Recommended for local testing only. Set it to `sys.stdout` to print - traces and logs to console output, or a string holding the OpenTelemetry protocol (OTLP) - endpoint such as "http://localhost:4317". - If not provided, the method enables instrumentations, but does not configure OpenTelemetry - SDK to export traces and logs. - :paramtype destination: Union[TextIO, str, None] - """ - _enable_telemetry(destination=destination, **kwargs) - - -class AgentsOperations(AgentsOperationsGenerated): - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - self._function_tool = _models.AsyncFunctionTool(set()) - - # pylint: disable=arguments-differ - @overload - async def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :type body: Union[JSON, IO[bytes]] - :keyword model: The ID of the model to use. Required if `body` is not provided. - :paramtype model: str - :keyword name: The name of the new agent. - :paramtype name: Optional[str] - :keyword description: A description for the new agent. - :paramtype description: Optional[str] - :keyword instructions: System instructions for the agent. - :paramtype instructions: Optional[str] - :keyword tools: List of tools definitions for the agent. - :paramtype tools: Optional[List[_models.ToolDefinition]] - :keyword tool_resources: Resources used by the agent's tools. - :paramtype tool_resources: Optional[_models.ToolResources] - :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :paramtype toolset: Optional[_models.AsyncToolSet] - :keyword temperature: Sampling temperature for generating agent responses. - :paramtype temperature: Optional[float] - :keyword top_p: Nucleus sampling parameter. - :paramtype top_p: Optional[float] - :keyword response_format: Response format for tool calls. - :paramtype response_format: Optional["_types.AgentsApiResponseFormatOption"] - :keyword metadata: Key/value pairs for storing additional information. - :paramtype metadata: Optional[Dict[str, str]] - :keyword content_type: Content type of the body. - :paramtype content_type: str - :return: An Agent object. - :rtype: _models.Agent - :raises: HttpResponseError for HTTP errors. - """ - if body is not _Unset: - if isinstance(body, io.IOBase): - return await super().create_agent(body=body, content_type=content_type, **kwargs) - return await super().create_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - new_agent = await super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - return new_agent - - # pylint: disable=arguments-differ - @overload - async def update_agent( # pylint: disable=arguments-differ - self, - agent_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def update_agent( # pylint: disable=arguments-differ - self, - agent_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_agent( - self, - agent_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - self._validate_tools_and_tool_resources(tools, tool_resources) - - if body is not _Unset: - if isinstance(body, io.IOBase): - return await super().update_agent(body=body, content_type=content_type, **kwargs) - return await super().update_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - return await super().update_agent( - agent_id=agent_id, - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def _validate_tools_and_tool_resources( - self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] - ): - if tool_resources is None: - return - if tools is None: - tools = [] - - if tool_resources.file_search is not None and not any( - isinstance(tool, _models.FileSearchToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" - ) - if tool_resources.code_interpreter is not None and not any( - isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" - ) - - # pylint: disable=arguments-differ - @overload - async def create_run( # pylint: disable=arguments-differ - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, - thread_id: str, - body: IO[bytes], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - agent_id: str = _Unset, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif agent_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - include=include, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return await response - - @distributed_trace_async - async def create_and_process_run( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword toolset: The Collection of tools and resources (alternative to `tools` and - `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.projects.models.AgentsApiResponseFormatMode or - ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype sleep_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = await self.create_run( - thread_id=thread_id, - agent_id=agent_id, - include=include, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=toolset.definitions if toolset else None, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - while run.status in [ - RunStatus.QUEUED, - RunStatus.IN_PROGRESS, - RunStatus.REQUIRES_ACTION, - ]: - time.sleep(sleep_interval) - run = await self.get_run(thread_id=thread_id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logging.warning("No tool calls provided - cancelling run") - await self.cancel_run(thread_id=thread_id, run_id=run.id) - break - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = _models.AsyncToolSet() - toolset.add(self._function_tool) - tool_outputs = await toolset.execute_tool_calls(tool_calls) - - logging.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - await self.submit_tool_outputs_to_run( - thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs - ) - - logging.info("Current run status: %s", run.status) - - return run - - @overload - async def create_stream( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: None = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.AsyncAgentEventHandler]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: None - :paramtype event_handler: None. _models.AsyncAgentEventHandler will be applied as default. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: _models.BaseAsyncAgentEventHandlerT, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.BaseAsyncAgentEventHandlerT]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - event_handler: None = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.AsyncAgentEventHandler]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a `data: [DONE]` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword event_handler: None - :paramtype event_handler: None. _models.AsyncAgentEventHandler will be applied as default. - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - event_handler: _models.BaseAsyncAgentEventHandlerT, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.BaseAsyncAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a `data: [DONE]` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - agent_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.BaseAsyncAgentEventHandlerT] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.BaseAsyncAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a `data: [DONE]` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif agent_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - agent_id=agent_id, - include=include, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - if not event_handler: - event_handler = cast(_models.BaseAsyncAgentEventHandlerT, _models.AsyncAgentEventHandler()) - - return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - # pylint: disable=arguments-differ - @overload - async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return await response - - @overload - async def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]], - *, - event_handler: _models.BaseAsyncAgentEventHandler, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: _models.BaseAsyncAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: _models.BaseAsyncAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) - - async def _handle_submit_tool_outputs( - self, run: _models.ThreadRun, event_handler: _models.BaseAsyncAgentEventHandler - ) -> None: - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return - - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if ( - any(tool_call.type == "function" for tool_call in tool_calls) - and len(self._function_tool.definitions) > 0 - ): - toolset = _models.AsyncToolSet() - toolset.add(self._function_tool) - tool_outputs = await toolset.execute_tool_calls(tool_calls) - - logger.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - await self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler - ) - - # pylint: disable=arguments-differ - @overload - async def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def upload_file( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: _models.OpenAIFile - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - # If a JSON body is provided directly, pass it along - if body is not None: - return await super()._upload_file(body=body, **kwargs) - - # Convert FilePurpose enum to string if necessary - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - # If file content is passed in directly - if file is not None and purpose is not None: - return await super()._upload_file(body={"file": file, "purpose": purpose, "filename": filename}, **kwargs) - - # If a file path is provided - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # If no explicit filename is provided, use the base name - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return await super()._upload_file(body={"file": file_content, "purpose": purpose}, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}.") from e - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - async def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file_and_poll( - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: _models.OpenAIFile - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - uploaded_file = await self.upload_file(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = await self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = await self.upload_file(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_file_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - time.sleep(sleep_interval) - uploaded_file = await self.get_file(uploaded_file.id) - - return uploaded_file - - @overload - async def create_vector_store_and_poll( - self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_and_poll( - self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_and_poll( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store = await super().create_vector_store( - body=body, content_type=content_type or "application/json", **kwargs - ) - elif isinstance(body, io.IOBase): - vector_store = await super().create_vector_store(body=body, content_type=content_type, **kwargs) - else: - raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") - else: - store_configuration = None - if data_sources: - store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) - - vector_store = await super().create_vector_store( - file_ids=file_ids, - store_configuration=store_configuration, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - - while vector_store.status == "in_progress": - time.sleep(sleep_interval) - vector_store = await super().get_vector_store(vector_store.id) - - return vector_store - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword content_type: Body parameter content-type. Defaults to "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file_batch = await super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file_batch = await super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") - else: - vector_store_file_batch = await super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - file_ids=file_ids, - data_sources=data_sources, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file_batch.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file_batch = await super().get_vector_store_file_batch( - vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id - ) - - return vector_store_file_batch - - @overload - async def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_and_poll( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file = await super().create_vector_store_file( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file = await super().create_vector_store_file( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") - else: - vector_store_file = await super().create_vector_store_file( - vector_store_id=vector_store_id, - file_id=file_id, - data_source=data_source, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file = await super().get_vector_store_file( - vector_store_id=vector_store_id, file_id=vector_store_file.id - ) - - return vector_store_file - - @distributed_trace_async - async def get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[bytes]: - """ - Asynchronously returns file content as a byte stream for the given file_id. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: An async iterator that yields bytes from the file content. - :rtype: AsyncIterator[bytes] - :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. - """ - kwargs["stream"] = True - response = await super()._get_file_content(file_id, **kwargs) - return cast(AsyncIterator[bytes], response) - - @distributed_trace_async - async def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: - """ - Asynchronously saves file content retrieved using a file identifier to the specified local directory. - - :param file_id: The unique identifier for the file to retrieve. - :type file_id: str - :param file_name: The name of the file to be saved. - :type file_name: str - :param target_dir: The directory where the file should be saved. Defaults to the current working directory. - :type target_dir: str or Path - :raises ValueError: If the target path is not a directory or the file name is invalid. - :raises RuntimeError: If file content retrieval fails or no content is found. - :raises TypeError: If retrieved chunks are not bytes-like objects. - :raises IOError: If writing to the file fails. - """ - try: - # Determine and validate the target directory - path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() - path.mkdir(parents=True, exist_ok=True) - if not path.is_dir(): - raise ValueError(f"The target path '{path}' is not a directory.") - - # Sanitize and validate the file name - sanitized_file_name = Path(file_name).name - if not sanitized_file_name: - raise ValueError("The provided file name is invalid.") - - # Retrieve the file content - file_content_stream = await self.get_file_content(file_id) - if not file_content_stream: - raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") - - # Collect all chunks asynchronously - chunks = [] - async for chunk in file_content_stream: - if isinstance(chunk, (bytes, bytearray)): - chunks.append(chunk) - else: - raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - - target_file_path = path / sanitized_file_name - - # Write the collected content to the file synchronously - def write_file(collected_chunks: list): - with open(target_file_path, "wb") as file: - for chunk in collected_chunks: - file.write(chunk) - - # Use the event loop to run the synchronous function in a thread executor - loop = asyncio.get_running_loop() - await loop.run_in_executor(None, write_file, chunks) - - logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) - - except (ValueError, RuntimeError, TypeError, IOError) as e: - logger.error("An error occurred in save_file: %s", e) - raise - - @distributed_trace_async - async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param agent_id: Identifier of the agent. Required. - :type agent_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - return await super().delete_agent(agent_id, **kwargs) - - @overload - def enable_auto_function_calls(self, *, functions: Set[Callable[..., Any]]) -> None: - """Enables tool calls to be executed automatically during create_and_process_run or streaming. - If this is not set, functions must be called manually. - :keyword functions: A set of callable functions to be used as tools. - :type functions: Set[Callable[..., Any]] - """ - - @overload - def enable_auto_function_calls(self, *, function_tool: _models.AsyncFunctionTool) -> None: - """Enables tool calls to be executed automatically during create_and_process_run or streaming. - If this is not set, functions must be called manually. - :keyword function_tool: An AsyncFunctionTool object representing the tool to be used. - :type function_tool: Optional[_models.AsyncFunctionTool] - """ - - @overload - def enable_auto_function_calls(self, *, toolset: _models.AsyncToolSet) -> None: - """Enables tool calls to be executed automatically during create_and_process_run or streaming. - If this is not set, functions must be called manually. - :keyword toolset: An AsyncToolSet object representing the set of tools to be used. - :type toolset: Optional[_models.AsyncToolSet] - """ - - def enable_auto_function_calls( - self, - *, - functions: Optional[Set[Callable[..., Any]]] = None, - function_tool: Optional[_models.AsyncFunctionTool] = None, - toolset: Optional[_models.AsyncToolSet] = None, - ) -> None: - """Enables tool calls to be executed automatically during create_and_process_run or streaming. - If this is not set, functions must be called manually. - :keyword functions: A set of callable functions to be used as tools. - :type functions: Set[Callable[..., Any]] - :keyword function_tool: An AsyncFunctionTool object representing the tool to be used. - :type function_tool: Optional[_models.AsyncFunctionTool] - :keyword toolset: An AsyncToolSet object representing the set of tools to be used. - :type toolset: Optional[_models.AsyncToolSet] - """ - if functions: - self._function_tool = _models.AsyncFunctionTool(functions) - elif function_tool: - self._function_tool = function_tool - elif toolset: - tool = toolset.get_tool(_models.AsyncFunctionTool) - self._function_tool = tool - - -class _SyncCredentialWrapper(TokenCredential): - """ - The class, synchronizing AsyncTokenCredential. - - :param async_credential: The async credential to be synchronized. - :type async_credential: ~azure.core.credentials_async.AsyncTokenCredential - """ - - def __init__(self, async_credential: "AsyncTokenCredential"): - self._async_credential = async_credential - - def get_token( - self, - *scopes: str, - claims: Optional[str] = None, - tenant_id: Optional[str] = None, - enable_cae: bool = False, - **kwargs: Any, - ) -> "AccessToken": - - pool = concurrent.futures.ThreadPoolExecutor() - return pool.submit( - asyncio.run, - self._async_credential.get_token( - *scopes, - claims=claims, - tenant_id=tenant_id, - enable_cae=enable_cae, - **kwargs, - ), - ).result() - - -__all__: List[str] = [ - "AgentsOperations", - "ConnectionsOperations", - "TelemetryOperations", - "InferenceOperations", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 2808264c9cf4..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -1,1943 +1,15 @@ -# pylint: disable=too-many-lines -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio # pylint: disable=do-not-import-asyncio -import base64 -import datetime -import inspect -import itertools -import json -import logging -import math -import re -from abc import ABC, abstractmethod -from typing import ( - Any, - AsyncIterator, - Awaitable, - Callable, - Dict, - Generic, - Iterator, - List, - Mapping, - Optional, - Set, - Tuple, - Type, - TypeVar, - Union, - cast, - get_args, - get_origin, - overload, -) +from typing import List -from azure.core.credentials import AccessToken, TokenCredential -from azure.core.credentials_async import AsyncTokenCredential - -from ._enums import AgentStreamEvent, ConnectionType, MessageRole, AzureAISearchQueryType -from ._models import ( - AISearchIndexResource, - AzureAISearchResource, - AzureAISearchToolDefinition, - AzureFunctionDefinition, - AzureFunctionStorageQueue, - AzureFunctionToolDefinition, - AzureFunctionBinding, - BingCustomSearchToolDefinition, - BingGroundingToolDefinition, - CodeInterpreterToolDefinition, - CodeInterpreterToolResource, - FileSearchToolDefinition, - FileSearchToolResource, - FunctionDefinition, - FunctionToolDefinition, - GetConnectionResponse, - MessageImageFileContent, - MessageTextContent, - MessageTextFileCitationAnnotation, - MessageTextUrlCitationAnnotation, - MessageTextFilePathAnnotation, - MicrosoftFabricToolDefinition, - OpenApiAuthDetails, - OpenApiToolDefinition, - OpenApiFunctionDefinition, - RequiredFunctionToolCall, - RunStep, - RunStepDeltaChunk, - SearchConfiguration, - SearchConfigurationList, - SharepointToolDefinition, - SubmitToolOutputsAction, - ThreadRun, - ToolConnection, - ToolConnectionList, - ToolDefinition, - ToolResources, - MessageDeltaTextContent, - VectorStoreDataSource, -) - -from ._models import MessageDeltaChunk as MessageDeltaChunkGenerated -from ._models import ThreadMessage as ThreadMessageGenerated -from ._models import OpenAIPageableListOfThreadMessage as OpenAIPageableListOfThreadMessageGenerated -from ._models import MessageAttachment as MessageAttachmentGenerated - -from .. import _types - - -logger = logging.getLogger(__name__) - -StreamEventData = Union["MessageDeltaChunk", "ThreadMessage", ThreadRun, RunStep, str] - - -def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: - """ - Remove the parameters, non present in class public fields; return shallow copy of a dictionary. - - **Note:** Classes inherited from the model check that the parameters are present - in the list of attributes and if they are not, the error is being raised. This check may not - be relevant for classes, not inherited from azure.ai.projects._model_base.Model. - :param Type model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :type parameters: Union[str, Dict[str, Any]] - :return: The dictionary with all invalid parameters removed. - :rtype: Dict[str, Any] - """ - new_params = {} - valid_parameters = set( - filter( - lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() - ) - ) - for k in filter(lambda x: x in valid_parameters, parameters.keys()): - new_params[k] = parameters[k] - return new_params - - -def _safe_instantiate( - model_class: Type, parameters: Union[str, Dict[str, Any]], *, generated_class: Optional[Type] = None -) -> StreamEventData: - """ - Instantiate class with the set of parameters from the server. - - :param Type model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :type parameters: Union[str, Dict[str, Any]] - :keyword Optional[Type] generated_class: The optional generated type. - :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. - :rtype: Any - """ - if not generated_class: - generated_class = model_class - if not isinstance(parameters, dict): - return parameters - return cast(StreamEventData, model_class(**_filter_parameters(generated_class, parameters))) - - -def _parse_event(event_data_str: str) -> Tuple[str, StreamEventData]: - event_lines = event_data_str.strip().split("\n") - event_type: Optional[str] = None - event_data = "" - event_obj: StreamEventData - for line in event_lines: - if line.startswith("event:"): - event_type = line.split(":", 1)[1].strip() - elif line.startswith("data:"): - event_data = line.split(":", 1)[1].strip() - - if not event_type: - raise ValueError("Event type not specified in the event data.") - - try: - parsed_data: Union[str, Dict[str, StreamEventData]] = cast(Dict[str, StreamEventData], json.loads(event_data)) - except json.JSONDecodeError: - parsed_data = event_data - - # Workaround for service bug: Rename 'expires_at' to 'expired_at' - if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: - parsed_data["expired_at"] = parsed_data.pop("expires_at") - - if isinstance(parsed_data, dict) and "assistant_id" in parsed_data: - parsed_data["agent_id"] = parsed_data.pop("assistant_id") - - # Map to the appropriate class instance - if event_type in { - AgentStreamEvent.THREAD_RUN_CREATED.value, - AgentStreamEvent.THREAD_RUN_QUEUED.value, - AgentStreamEvent.THREAD_RUN_INCOMPLETE.value, - AgentStreamEvent.THREAD_RUN_IN_PROGRESS.value, - AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION.value, - AgentStreamEvent.THREAD_RUN_COMPLETED.value, - AgentStreamEvent.THREAD_RUN_FAILED.value, - AgentStreamEvent.THREAD_RUN_CANCELLING.value, - AgentStreamEvent.THREAD_RUN_CANCELLED.value, - AgentStreamEvent.THREAD_RUN_EXPIRED.value, - }: - event_obj = _safe_instantiate(ThreadRun, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_RUN_STEP_CREATED.value, - AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS.value, - AgentStreamEvent.THREAD_RUN_STEP_COMPLETED.value, - AgentStreamEvent.THREAD_RUN_STEP_FAILED.value, - AgentStreamEvent.THREAD_RUN_STEP_CANCELLED.value, - AgentStreamEvent.THREAD_RUN_STEP_EXPIRED.value, - }: - event_obj = _safe_instantiate(RunStep, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_MESSAGE_CREATED.value, - AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS.value, - AgentStreamEvent.THREAD_MESSAGE_COMPLETED.value, - AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE.value, - }: - event_obj = _safe_instantiate(ThreadMessage, parsed_data, generated_class=ThreadMessageGenerated) - elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA.value: - event_obj = _safe_instantiate(MessageDeltaChunk, parsed_data, generated_class=MessageDeltaChunkGenerated) - - elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA.value: - event_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) - else: - event_obj = str(parsed_data) - - return event_type, event_obj - - -class ConnectionProperties: - """The properties of a single connection. - - :ivar id: A unique identifier for the connection. - :vartype id: str - :ivar name: The friendly name of the connection. - :vartype name: str - :ivar authentication_type: The authentication type used by the connection. - :vartype authentication_type: ~azure.ai.projects.models._models.AuthenticationType - :ivar connection_type: The connection type . - :vartype connection_type: ~azure.ai.projects.models._models.ConnectionType - :ivar endpoint_url: The endpoint URL associated with this connection - :vartype endpoint_url: str - :ivar key: The api-key to be used when accessing the connection. - :vartype key: str - :ivar token_credential: The TokenCredential to be used when accessing the connection. - :vartype token_credential: ~azure.core.credentials.TokenCredential - """ - - def __init__( - self, - *, - connection: GetConnectionResponse, - token_credential: Union[TokenCredential, AsyncTokenCredential, None] = None, - ) -> None: - self.id = connection.id - self.name = connection.name - self.authentication_type = connection.properties.auth_type - self.connection_type = cast(ConnectionType, connection.properties.category) - self.endpoint_url = ( - connection.properties.target[:-1] - if connection.properties.target.endswith("/") - else connection.properties.target - ) - self.key: Optional[str] = None - if hasattr(connection.properties, "credentials"): - if hasattr(connection.properties.credentials, "key"): # type: ignore - self.key = connection.properties.credentials.key # type: ignore - self.token_credential = token_credential - - def to_evaluator_model_config( - self, deployment_name: str, api_version: str, *, include_credentials: bool = False - ) -> Dict[str, str]: - """Get model configuration to be used with evaluators, from connection. - - :param deployment_name: Deployment name to build model configuration. - :type deployment_name: str - :param api_version: API version used by model deployment. - :type api_version: str - :keyword include_credentials: Include credentials in the model configuration. If set to True, the model - configuration will have the key field set to the actual key value. - If set to False, the model configuration will have the key field set to the connection id. - To get the secret, connection.get method should be called with include_credentials set to True. - :paramtype include_credentials: bool - - :returns: Model configuration dictionary. - :rtype: Dict[str, str] - """ - connection_type = self.connection_type.value - if self.connection_type.value == ConnectionType.AZURE_OPEN_AI: - connection_type = "azure_openai" - - if self.authentication_type == "ApiKey": - model_config = { - "azure_deployment": deployment_name, - "azure_endpoint": self.endpoint_url, - "type": connection_type, - "api_version": api_version, - "api_key": self.key if include_credentials and self.key else f"{self.id}/credentials/key", - } - else: - model_config = { - "azure_deployment": deployment_name, - "azure_endpoint": self.endpoint_url, - "type": self.connection_type, - "api_version": api_version, - } - return model_config - - def __str__(self): - out = "{\n" - out += f' "name": "{self.name}",\n' - out += f' "id": "{self.id}",\n' - out += f' "authentication_type": "{self.authentication_type}",\n' - out += f' "connection_type": "{self.connection_type}",\n' - out += f' "endpoint_url": "{self.endpoint_url}",\n' - if self.key: - out += ' "key": "REDACTED"\n' - else: - out += ' "key": null\n' - if self.token_credential: - out += ' "token_credential": "REDACTED"\n' - else: - out += ' "token_credential": null\n' - out += "}\n" - return out - - -# TODO: Look into adding an async version of this class -class SASTokenCredential(TokenCredential): - def __init__( - self, - *, - sas_token: str, - credential: TokenCredential, - subscription_id: str, - resource_group_name: str, - project_name: str, - connection_name: str, - ): - self._sas_token = sas_token - self._credential = credential - self._subscription_id = subscription_id - self._resource_group_name = resource_group_name - self._project_name = project_name - self._connection_name = connection_name - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) - - @classmethod - def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime.datetime: - payload = jwt_token.split(".")[1] - padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary - decoded_bytes = base64.urlsafe_b64decode(padded_payload) - decoded_str = decoded_bytes.decode("utf-8") - decoded_payload = json.loads(decoded_str) - expiration_date = decoded_payload.get("exp") - return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) - - def _refresh_token(self) -> None: - logger.debug("[SASTokenCredential._refresh_token] Enter") - from azure.ai.projects import AIProjectClient - - project_client = AIProjectClient( - credential=self._credential, - # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. - # http://management.azure.com is hard coded in the SDK. - endpoint="not-needed", - subscription_id=self._subscription_id, - resource_group_name=self._resource_group_name, - project_name=self._project_name, - ) - - connection = project_client.connections.get(connection_name=self._connection_name, include_credentials=True) - - self._sas_token = "" - if connection is not None and connection.token_credential is not None: - sas_credential = cast(SASTokenCredential, connection.token_credential) - self._sas_token = sas_credential._sas_token # pylint: disable=protected-access - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) - - def get_token( - self, - *scopes: str, - claims: Optional[str] = None, - tenant_id: Optional[str] = None, - enable_cae: bool = False, - **kwargs: Any, - ) -> AccessToken: - """Request an access token for `scopes`. - - :param str scopes: The type of access needed. - - :keyword str claims: Additional claims required in the token, such as those returned in a resource - provider's claims challenge following an authorization failure. - :keyword str tenant_id: Optional tenant to include in the token request. - :keyword bool enable_cae: Indicates whether to enable Continuous Access Evaluation (CAE) for the requested - token. Defaults to False. - - :rtype: AccessToken - :return: An AccessToken instance containing the token string and its expiration time in Unix time. - """ - logger.debug("SASTokenCredential.get_token] Enter") - if self._expires_on < datetime.datetime.now(datetime.timezone.utc): - self._refresh_token() - return AccessToken(self._sas_token, math.floor(self._expires_on.timestamp())) - - -# Define type_map to translate Python type annotations to JSON Schema types -type_map = { - "str": "string", - "int": "integer", - "float": "number", - "bool": "boolean", - "NoneType": "null", - "list": "array", - "dict": "object", -} - - -def _map_type(annotation) -> Dict[str, Any]: # pylint: disable=too-many-return-statements - if annotation == inspect.Parameter.empty: - return {"type": "string"} # Default type if annotation is missing - - origin = get_origin(annotation) - - if origin in {list, List}: - args = get_args(annotation) - item_type = args[0] if args else str - return {"type": "array", "items": _map_type(item_type)} - if origin in {dict, Dict}: - return {"type": "object"} - if origin is Union: - args = get_args(annotation) - # If Union contains None, it is an optional parameter - if type(None) in args: - # If Union contains only one non-None type, it is a nullable parameter - non_none_args = [arg for arg in args if arg is not type(None)] - if len(non_none_args) == 1: - schema = _map_type(non_none_args[0]) - if "type" in schema: - if isinstance(schema["type"], str): - schema["type"] = [schema["type"], "null"] - elif "null" not in schema["type"]: - schema["type"].append("null") - else: - schema["type"] = ["null"] - return schema - # If Union contains multiple types, it is a oneOf parameter - return {"oneOf": [_map_type(arg) for arg in args]} - if isinstance(annotation, type): - schema_type = type_map.get(annotation.__name__, "string") - return {"type": schema_type} - - return {"type": "string"} # Fallback to "string" if type is unrecognized - - -def is_optional(annotation) -> bool: - origin = get_origin(annotation) - if origin is Union: - args = get_args(annotation) - return type(None) in args - return False - - -class MessageDeltaChunk(MessageDeltaChunkGenerated): - @property - def text(self) -> str: - """Get the text content of the delta chunk. - - :rtype: str - """ - if not self.delta or not self.delta.content: - return "" - return "".join( - content_part.text.value or "" - for content_part in self.delta.content - if isinstance(content_part, MessageDeltaTextContent) and content_part.text - ) - - -class ThreadMessage(ThreadMessageGenerated): - @property - def text_messages(self) -> List[MessageTextContent]: - """Returns all text message contents in the messages. - - :rtype: List[MessageTextContent] - """ - if not self.content: - return [] - return [content for content in self.content if isinstance(content, MessageTextContent)] - - @property - def image_contents(self) -> List[MessageImageFileContent]: - """Returns all image file contents from image message contents in the messages. - - :rtype: List[MessageImageFileContent] - """ - if not self.content: - return [] - return [content for content in self.content if isinstance(content, MessageImageFileContent)] - - @property - def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: - """Returns all file citation annotations from text message annotations in the messages. - - :rtype: List[MessageTextFileCitationAnnotation] - """ - if not self.content: - return [] - - return [ - annotation - for content in self.content - if isinstance(content, MessageTextContent) - for annotation in content.text.annotations - if isinstance(annotation, MessageTextFileCitationAnnotation) - ] - - @property - def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: - """Returns all file path annotations from text message annotations in the messages. - - :rtype: List[MessageTextFilePathAnnotation] - """ - if not self.content: - return [] - return [ - annotation - for content in self.content - if isinstance(content, MessageTextContent) - for annotation in content.text.annotations - if isinstance(annotation, MessageTextFilePathAnnotation) - ] - - @property - def url_citation_annotations(self) -> List[MessageTextUrlCitationAnnotation]: - """Returns all URL citation annotations from text message annotations in the messages. - - :rtype: List[MessageTextUrlCitationAnnotation] - """ - if not self.content: - return [] - return [ - annotation - for content in self.content - if isinstance(content, MessageTextContent) - for annotation in content.text.annotations - if isinstance(annotation, MessageTextUrlCitationAnnotation) - ] - - -class MessageAttachment(MessageAttachmentGenerated): - @overload - def __init__( - self, - *, - tools: List["FileSearchToolDefinition"], - file_id: Optional[str] = None, - data_source: Optional["VectorStoreDataSource"] = None, - ) -> None: ... - @overload - def __init__( - self, - *, - tools: List["CodeInterpreterToolDefinition"], - file_id: Optional[str] = None, - data_source: Optional["VectorStoreDataSource"] = None, - ) -> None: ... - @overload - def __init__( - self, - *, - tools: List["_types.MessageAttachmentToolDefinition"], - file_id: Optional[str] = None, - data_source: Optional["VectorStoreDataSource"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -ToolDefinitionT = TypeVar("ToolDefinitionT", bound=ToolDefinition) -ToolT = TypeVar("ToolT", bound="Tool") - - -class Tool(ABC, Generic[ToolDefinitionT]): - """ - An abstract class representing a tool that can be used by an agent. - """ - - @property - @abstractmethod - def definitions(self) -> List[ToolDefinitionT]: - """Get the tool definitions.""" - - @property - @abstractmethod - def resources(self) -> ToolResources: - """Get the tool resources.""" - - @abstractmethod - def execute(self, tool_call: Any) -> Any: - """ - Execute the tool with the provided tool call. - - :param Any tool_call: The tool call to execute. - :return: The output of the tool operations. - """ - - -class BaseFunctionTool(Tool[FunctionToolDefinition]): - """ - A tool that executes user-defined functions. - """ - - def __init__(self, functions: Set[Callable[..., Any]]): - """ - Initialize FunctionTool with a set of functions. - - :param functions: A set of function objects. - """ - self._functions = self._create_function_dict(functions) - self._definitions = self._build_function_definitions(self._functions) - - def add_functions(self, extra_functions: Set[Callable[..., Any]]) -> None: - """ - Add more functions into this FunctionTool’s existing function set. - If a function with the same name already exists, it is overwritten. - - :param extra_functions: A set of additional functions to be added to - the existing function set. Functions are defined as callables and - may have any number of arguments and return types. - :type extra_functions: Set[Callable[..., Any]] - """ - # Convert the existing dictionary of { name: function } back into a set - existing_functions = set(self._functions.values()) - # Merge old + new - combined = existing_functions.union(extra_functions) - # Rebuild state - self._functions = self._create_function_dict(combined) - self._definitions = self._build_function_definitions(self._functions) - - def _create_function_dict(self, functions: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: - return {func.__name__: func for func in functions} - - def _build_function_definitions(self, functions: Dict[str, Any]) -> List[FunctionToolDefinition]: - specs: List[FunctionToolDefinition] = [] - # Flexible regex to capture ':param : ' - param_pattern = re.compile( - r""" - ^\s* # Optional leading whitespace - :param # Literal ':param' - \s+ # At least one whitespace character - (?P[^:\s\(\)]+) # Parameter name (no spaces, colons, or parentheses) - (?:\s*\(\s*(?P[^)]+?)\s*\))? # Optional type in parentheses, allowing internal spaces - \s*:\s* # Colon ':' surrounded by optional whitespace - (?P.+) # Description (rest of the line) - """, - re.VERBOSE, - ) - - for name, func in functions.items(): - sig = inspect.signature(func) - params = sig.parameters - docstring = inspect.getdoc(func) or "" - description = docstring.split("\n", maxsplit=1)[0] if docstring else "No description" - - param_descriptions = {} - for line in docstring.splitlines(): - line = line.strip() - match = param_pattern.match(line) - if match: - groups = match.groupdict() - param_name = groups.get("name") - param_desc = groups.get("description") - param_desc = param_desc.strip() if param_desc else "No description" - param_descriptions[param_name] = param_desc.strip() - - properties = {} - required = [] - for param_name, param in params.items(): - param_type_info = _map_type(param.annotation) - param_description = param_descriptions.get(param_name, "No description") - - properties[param_name] = {**param_type_info, "description": param_description} - - # If the parameter has no default value and is not optional, add it to the required list - if param.default is inspect.Parameter.empty and not is_optional(param.annotation): - required.append(param_name) - - function_def = FunctionDefinition( - name=name, - description=description, - parameters={"type": "object", "properties": properties, "required": required}, - ) - tool_def = FunctionToolDefinition(function=function_def) - specs.append(tool_def) - - return specs - - def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: - function_name = tool_call.function.name - arguments = tool_call.function.arguments - - if function_name not in self._functions: - raise ValueError(f"Function '{function_name}' not found.") - - function = self._functions[function_name] - - try: - parsed_arguments = json.loads(arguments) - except json.JSONDecodeError as e: - raise ValueError(f"Invalid JSON arguments: {e}") from e - - if not isinstance(parsed_arguments, dict): - raise TypeError("Arguments must be a JSON object.") - - return function, parsed_arguments - - @property - def definitions(self) -> List[FunctionToolDefinition]: - """ - Get the function definitions. - - :return: A list of function definitions. - :rtype: List[ToolDefinition] - """ - return self._definitions - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources for the agent. - - :return: An empty ToolResources as FunctionTool doesn't have specific resources. - :rtype: ToolResources - """ - return ToolResources() - - -class FunctionTool(BaseFunctionTool): - - def execute(self, tool_call: RequiredFunctionToolCall) -> Any: - try: - function, parsed_arguments = self._get_func_and_args(tool_call) - return function(**parsed_arguments) if parsed_arguments else function() - except Exception as e: # pylint: disable=broad-exception-caught - error_message = f"Error executing function '{tool_call.function.name}': {e}" - logging.error(error_message) - # Return error message as JSON string back to agent in order to make possible self - # correction to the function call - return json.dumps({"error": error_message}) - - -class AsyncFunctionTool(BaseFunctionTool): - - async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: # pylint: disable=invalid-overridden-method - try: - function, parsed_arguments = self._get_func_and_args(tool_call) - if inspect.iscoroutinefunction(function): - return await function(**parsed_arguments) if parsed_arguments else await function() - return function(**parsed_arguments) if parsed_arguments else function() - except Exception as e: # pylint: disable=broad-exception-caught - error_message = f"Error executing function '{tool_call.function.name}': {e}" - logging.error(error_message) - # Return error message as JSON string back to agent in order to make possible self correction - # to the function call - return json.dumps({"error": error_message}) - - -class AzureAISearchTool(Tool[AzureAISearchToolDefinition]): - """ - A tool that searches for information using Azure AI Search. - :param connection_id: Connection ID used by tool. All connection tools allow only one connection. - """ - - def __init__( - self, - index_connection_id: str, - index_name: str, - query_type: AzureAISearchQueryType = AzureAISearchQueryType.SIMPLE, - filter: str = "", - top_k: int = 5, - ): - """ - Initialize AzureAISearch with an index_connection_id and index_name, with optional params. - - :param index_connection_id: Index Connection ID used by tool. Allows only one connection. - :type index_connection_id: str - :param index_name: Name of Index in search resource to be used by tool. - :type index_name: str - :param query_type: Type of query in an AIIndexResource attached to this agent. - Default value is AzureAISearchQueryType.SIMPLE. - :type query_type: AzureAISearchQueryType - :param filter: Odata filter string for search resource. - :type filter: str - :param top_k: Number of documents to retrieve from search and present to the model. - :type top_k: int - """ - self.index_list = [ - AISearchIndexResource( - index_connection_id=index_connection_id, - index_name=index_name, - query_type=query_type, - filter=filter, - top_k=top_k, - ) - ] - - @property - def definitions(self) -> List[AzureAISearchToolDefinition]: - """ - Get the Azure AI search tool definitions. - - :return: A list of tool definitions. - :rtype: List[ToolDefinition] - """ - return [AzureAISearchToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the Azure AI search resources. - - :return: ToolResources populated with azure_ai_search associated resources. - :rtype: ToolResources - """ - return ToolResources(azure_ai_search=AzureAISearchResource(index_list=self.index_list)) - - def execute(self, tool_call: Any): - """ - AI Search tool does not execute client-side. - - :param Any tool_call: The tool call to execute. - """ - - -class OpenApiTool(Tool[OpenApiToolDefinition]): - """ - A tool that retrieves information using OpenAPI specs. - Initialized with an initial API definition (name, description, spec, auth), - this class also supports adding and removing additional API definitions dynamically. - """ - - def __init__( - self, - name: str, - description: str, - spec: Any, - auth: OpenApiAuthDetails, - default_parameters: Optional[List[str]] = None, - ) -> None: - """ - Constructor initializes the tool with a primary API definition. - - :param name: The name of the API. - :type name: str - :param description: The API description. - :type description: str - :param spec: The API specification. - :type spec: Any - :param auth: Authentication details for the API. - :type auth: OpenApiAuthDetails - :param default_parameters: List of OpenAPI spec parameters that will use user-provided defaults. - :type default_parameters: Optional[List[str]] - """ - default_params: List[str] = [] if default_parameters is None else default_parameters - self._default_auth = auth - self._definitions: List[OpenApiToolDefinition] = [ - OpenApiToolDefinition( - openapi=OpenApiFunctionDefinition( - name=name, description=description, spec=spec, auth=auth, default_params=default_params - ) - ) - ] - - @property - def definitions(self) -> List[OpenApiToolDefinition]: - """ - Get the list of all API definitions for the tool. - - :return: A list of OpenAPI tool definitions. - :rtype: List[ToolDefinition] - """ - return self._definitions - - def add_definition( - self, - name: str, - description: str, - spec: Any, - auth: Optional[OpenApiAuthDetails] = None, - default_parameters: Optional[List[str]] = None, - ) -> None: - """ - Adds a new API definition dynamically. - Raises a ValueError if a definition with the same name already exists. - - :param name: The name of the API. - :type name: str - :param description: The description of the API. - :type description: str - :param spec: The API specification. - :type spec: Any - :param auth: Optional authentication details for this particular API definition. - If not provided, the tool's default authentication details will be used. - :type auth: Optional[OpenApiAuthDetails] - :param default_parameters: List of OpenAPI spec parameters that will use user-provided defaults. - :type default_parameters: List[str] - :raises ValueError: If a definition with the same name exists. - """ - default_params: List[str] = [] if default_parameters is None else default_parameters - - # Check if a definition with the same name exists. - if any(definition.openapi.name == name for definition in self._definitions): - raise ValueError(f"Definition '{name}' already exists and cannot be added again.") - - # Use provided auth if specified, otherwise use default - auth_to_use = auth if auth is not None else self._default_auth - - new_definition = OpenApiToolDefinition( - openapi=OpenApiFunctionDefinition( - name=name, description=description, spec=spec, auth=auth_to_use, default_params=default_params - ) - ) - self._definitions.append(new_definition) - - def remove_definition(self, name: str) -> None: - """ - Removes an API definition based on its name. - - :param name: The name of the API definition to remove. - :type name: str - :raises ValueError: If the definition with the specified name does not exist. - """ - for definition in self._definitions: - if definition.openapi.name == name: - self._definitions.remove(definition) - logging.info("Definition '%s' removed. Total definitions: %d.", name, len(self._definitions)) - return - raise ValueError(f"Definition with the name '{name}' does not exist.") - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources for the agent. - - :return: An empty ToolResources as OpenApiTool doesn't have specific resources. - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> None: - """ - OpenApiTool does not execute client-side. - - :param Any tool_call: The tool call to execute. - :type tool_call: Any - """ - - -class AzureFunctionTool(Tool[AzureFunctionToolDefinition]): - """ - A tool that is used to inform agent about available the Azure function. - - :param name: The azure function name. - :param description: The azure function description. - :param parameters: The description of function parameters. - :param input_queue: Input queue used, by azure function. - :param output_queue: Output queue used, by azure function. - """ - - def __init__( - self, - name: str, - description: str, - parameters: Dict[str, Any], - input_queue: AzureFunctionStorageQueue, - output_queue: AzureFunctionStorageQueue, - ) -> None: - self._definitions = [ - AzureFunctionToolDefinition( - azure_function=AzureFunctionDefinition( - function=FunctionDefinition( - name=name, - description=description, - parameters=parameters, - ), - input_binding=AzureFunctionBinding(storage_queue=input_queue), - output_binding=AzureFunctionBinding(storage_queue=output_queue), - ) - ) - ] - - @property - def definitions(self) -> List[AzureFunctionToolDefinition]: - """ - Get the Azure AI search tool definitions. - - :rtype: List[ToolDefinition] - """ - return self._definitions - - @property - def resources(self) -> ToolResources: - """ - Get the Azure AI search resources. - - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> Any: - pass - - -class ConnectionTool(Tool[ToolDefinitionT]): - """ - A tool that requires connection ids. - Used as base class for Bing Grounding, Sharepoint, and Microsoft Fabric - """ - - def __init__(self, connection_id: str): - """ - Initialize ConnectionTool with a connection_id. - - :param connection_id: Connection ID used by tool. All connection tools allow only one connection. - """ - self.connection_ids = [ToolConnection(connection_id=connection_id)] - - @property - def resources(self) -> ToolResources: - """ - Get the connection tool resources. - - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> Any: - pass - - -class BingGroundingTool(ConnectionTool[BingGroundingToolDefinition]): - """ - A tool that searches for information using Bing. - """ - - @property - def definitions(self) -> List[BingGroundingToolDefinition]: - """ - Get the Bing grounding tool definitions. - - :rtype: List[ToolDefinition] - """ - return [BingGroundingToolDefinition(bing_grounding=ToolConnectionList(connection_list=self.connection_ids))] - - -class BingCustomSearchTool(Tool[BingCustomSearchToolDefinition]): - """ - A tool that searches for information using Bing Custom Search. - """ - - def __init__(self, connection_id: str, instance_name: str): - """ - Initialize Bing Custom Search with a connection_id. - - :param connection_id: Connection ID used by tool. Bing Custom Search tools allow only one connection. - :param instance_name: Config instance name used by tool. - """ - self.connection_ids = [SearchConfiguration(connection_id=connection_id, instance_name=instance_name)] - - @property - def definitions(self) -> List[BingCustomSearchToolDefinition]: - """ - Get the Bing grounding tool definitions. - - :rtype: List[ToolDefinition] - """ - return [ - BingCustomSearchToolDefinition( - bing_custom_search=SearchConfigurationList(search_configurations=self.connection_ids) - ) - ] - - @property - def resources(self) -> ToolResources: - """ - Get the connection tool resources. - - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> Any: - pass - - -class FabricTool(ConnectionTool[MicrosoftFabricToolDefinition]): - """ - A tool that searches for information using Microsoft Fabric. - """ - - @property - def definitions(self) -> List[MicrosoftFabricToolDefinition]: - """ - Get the Microsoft Fabric tool definitions. - - :rtype: List[ToolDefinition] - """ - return [MicrosoftFabricToolDefinition(fabric_dataagent=ToolConnectionList(connection_list=self.connection_ids))] - - -class SharepointTool(ConnectionTool[SharepointToolDefinition]): - """ - A tool that searches for information using Sharepoint. - """ - - @property - def definitions(self) -> List[SharepointToolDefinition]: - """ - Get the Sharepoint tool definitions. - - :rtype: List[ToolDefinition] - """ - return [SharepointToolDefinition(sharepoint_grounding=ToolConnectionList(connection_list=self.connection_ids))] - - -class FileSearchTool(Tool[FileSearchToolDefinition]): - """ - A tool that searches for uploaded file information from the created vector stores. - - :param vector_store_ids: A list of vector store IDs to search for files. - :type vector_store_ids: list[str] - """ - - def __init__(self, vector_store_ids: Optional[List[str]] = None): - if vector_store_ids is None: - self.vector_store_ids = set() - else: - self.vector_store_ids = set(vector_store_ids) - - def add_vector_store(self, store_id: str) -> None: - """ - Add a vector store ID to the list of vector stores to search for files. - - :param store_id: The ID of the vector store to search for files. - :type store_id: str - - """ - self.vector_store_ids.add(store_id) - - def remove_vector_store(self, store_id: str) -> None: - """ - Remove a vector store ID from the list of vector stores to search for files. - - :param store_id: The ID of the vector store to remove. - :type store_id: str - - """ - self.vector_store_ids.remove(store_id) - - @property - def definitions(self) -> List[FileSearchToolDefinition]: - """ - Get the file search tool definitions. - - :rtype: List[ToolDefinition] - """ - return [FileSearchToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the file search resources. - - :rtype: ToolResources - """ - return ToolResources(file_search=FileSearchToolResource(vector_store_ids=list(self.vector_store_ids))) - - def execute(self, tool_call: Any) -> Any: - pass - - -class CodeInterpreterTool(Tool[CodeInterpreterToolDefinition]): - """ - A tool that interprets code files uploaded to the agent. - - :param file_ids: A list of file IDs to interpret. - :type file_ids: list[str] - """ - - def __init__(self, file_ids: Optional[List[str]] = None): - if file_ids is None: - self.file_ids = set() - else: - self.file_ids = set(file_ids) - - def add_file(self, file_id: str) -> None: - """ - Add a file ID to the list of files to interpret. - - :param file_id: The ID of the file to interpret. - :type file_id: str - """ - self.file_ids.add(file_id) - - def remove_file(self, file_id: str) -> None: - """ - Remove a file ID from the list of files to interpret. - - :param file_id: The ID of the file to remove. - :type file_id: str - """ - self.file_ids.remove(file_id) - - @property - def definitions(self) -> List[CodeInterpreterToolDefinition]: - """ - Get the code interpreter tool definitions. - - :rtype: List[ToolDefinition] - """ - return [CodeInterpreterToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the code interpreter resources. - - :rtype: ToolResources - """ - if not self.file_ids: - return ToolResources() - return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=list(self.file_ids))) - - def execute(self, tool_call: Any) -> Any: - pass - - -class BaseToolSet: - """ - Abstract class for a collection of tools that can be used by an agent. - """ - - def __init__(self) -> None: - self._tools: List[Tool] = [] - - def validate_tool_type(self, tool: Tool) -> None: - pass - - def add(self, tool: Tool): - """ - Add a tool to the tool set. - - :param Tool tool: The tool to add. - :raises ValueError: If a tool of the same type already exists. - """ - self.validate_tool_type(tool) - - if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): - raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") - self._tools.append(tool) - - def remove(self, tool_type: Type[Tool]) -> None: - """ - Remove a tool of the specified type from the tool set. - - :param Type[Tool] tool_type: The type of tool to remove. - :raises ValueError: If a tool of the specified type is not found. - """ - for i, tool in enumerate(self._tools): - if isinstance(tool, tool_type): - del self._tools[i] - logging.info("Tool of type %s removed from the ToolSet.", tool_type.__name__) - return - raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the definitions for all tools in the tool set. - - :rtype: List[ToolDefinition] - """ - tools = [] - for tool in self._tools: - tools.extend(tool.definitions) - return tools - - @property - def resources(self) -> ToolResources: - """ - Get the resources for all tools in the tool set. - - :rtype: ToolResources - """ - tool_resources: Dict[str, Any] = {} - for tool in self._tools: - resources = tool.resources - for key, value in resources.items(): - if key in tool_resources: - if isinstance(tool_resources[key], dict) and isinstance(value, dict): - tool_resources[key].update(value) - else: - tool_resources[key] = value - return self._create_tool_resources_from_dict(tool_resources) - - def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: - """ - Safely converts a dictionary into a ToolResources instance. - - :param resources: A dictionary of tool resources. Should be a mapping - accepted by ~azure.ai.projects.models.AzureAISearchResource - :type resources: Dict[str, Any] - :return: A ToolResources instance. - :rtype: ToolResources - """ - try: - return ToolResources(**resources) - except TypeError as e: - logging.error("Error creating ToolResources: %s", e) - raise ValueError("Invalid resources for ToolResources.") from e - - def get_definitions_and_resources(self) -> Dict[str, Any]: - """ - Get the definitions and resources for all tools in the tool set. - - :return: A dictionary containing the tool resources and definitions. - :rtype: Dict[str, Any] - """ - return { - "tool_resources": self.resources, - "tools": self.definitions, - } - - def get_tool(self, tool_type: Type[ToolT]) -> ToolT: - """ - Get a tool of the specified type from the tool set. - - :param Type[Tool] tool_type: The type of tool to get. - :return: The tool of the specified type. - :rtype: Tool - :raises ValueError: If a tool of the specified type is not found. - """ - for tool in self._tools: - if isinstance(tool, tool_type): - return cast(ToolT, tool) - raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") - - -class ToolSet(BaseToolSet): - """ - A collection of tools that can be used by an synchronize agent. - """ - - def validate_tool_type(self, tool: Tool) -> None: - """ - Validate the type of the tool. - - :param Tool tool: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool, AsyncFunctionTool): - raise ValueError( - "AsyncFunctionTool is not supported in ToolSet. " - + "To use async functions, use AsyncToolSet and agents operations in azure.ai.projects.aio." - ) - - def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param List[Any] tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - :rtype: Any - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(FunctionTool) - output = tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) - except Exception as e: # pylint: disable=broad-exception-caught - tool_output = {"tool_call_id": tool_call.id, "output": str(e)} - tool_outputs.append(tool_output) - - return tool_outputs - - -class AsyncToolSet(BaseToolSet): - """ - A collection of tools that can be used by an asynchronous agent. - """ - - def validate_tool_type(self, tool: Tool) -> None: - """ - Validate the type of the tool. - - :param Tool tool: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool, FunctionTool): - raise ValueError( - "FunctionTool is not supported in AsyncToolSet. " - + "Please use AsyncFunctionTool instead and provide sync and/or async function(s)." - ) - - async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param List[Any] tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - :rtype: Any - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(AsyncFunctionTool) - output = await tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) - except Exception as e: # pylint: disable=broad-exception-caught - logging.error("Failed to execute tool call %s: %s", tool_call, e) - - return tool_outputs - - -EventFunctionReturnT = TypeVar("EventFunctionReturnT") -T = TypeVar("T") -BaseAsyncAgentEventHandlerT = TypeVar("BaseAsyncAgentEventHandlerT", bound="BaseAsyncAgentEventHandler") -BaseAgentEventHandlerT = TypeVar("BaseAgentEventHandlerT", bound="BaseAgentEventHandler") - - -async def async_chain(*iterators: AsyncIterator[T]) -> AsyncIterator[T]: - for iterator in iterators: - async for item in iterator: - yield item - - -class BaseAsyncAgentEventHandler(AsyncIterator[T]): - - def __init__(self) -> None: - self.response_iterator: Optional[AsyncIterator[bytes]] = None - self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAsyncAgentEventHandler[T]"], Awaitable[None]]] = ( - None - ) - self.buffer: Optional[bytes] = None - - def initialize( - self, - response_iterator: AsyncIterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, "BaseAsyncAgentEventHandler[T]"], Awaitable[None]], - ): - self.response_iterator = ( - async_chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator - ) - self.submit_tool_outputs = submit_tool_outputs - - # cspell:disable-next-line - async def __anext__(self) -> T: - # cspell:disable-next-line - event_bytes = await self.__anext_impl__() - return await self._process_event(event_bytes.decode("utf-8")) - - # cspell:disable-next-line - async def __anext_impl__(self) -> bytes: - self.buffer = b"" if self.buffer is None else self.buffer - if self.response_iterator is None: - raise ValueError("The response handler was not initialized.") - - if not b"\n\n" in self.buffer: - async for chunk in self.response_iterator: - self.buffer += chunk - if b"\n\n" in self.buffer: - break - - if self.buffer == b"": - raise StopAsyncIteration() - - event_bytes = b"" - if b"\n\n" in self.buffer: - event_end_index = self.buffer.index(b"\n\n") - event_bytes = self.buffer[:event_end_index] - self.buffer = self.buffer[event_end_index:].lstrip() - else: - event_bytes = self.buffer - self.buffer = b"" - - return event_bytes - - async def _process_event(self, event_data_str: str) -> T: - raise NotImplementedError("This method needs to be implemented.") - - async def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - Calls the provided callback function with each event data. - """ - try: - async for _ in self: - pass - except StopAsyncIteration: - pass - - -class BaseAgentEventHandler(Iterator[T]): - - def __init__(self) -> None: - self.response_iterator: Optional[Iterator[bytes]] = None - self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAgentEventHandler[T]"], None]] = None - self.buffer: Optional[bytes] = None - - def initialize( - self, - response_iterator: Iterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, "BaseAgentEventHandler[T]"], None], - ) -> None: - self.response_iterator = ( - itertools.chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator - ) - self.submit_tool_outputs = submit_tool_outputs - - def __next__(self) -> T: - event_bytes = self.__next_impl__() - return self._process_event(event_bytes.decode("utf-8")) - - def __next_impl__(self) -> bytes: - self.buffer = b"" if self.buffer is None else self.buffer - if self.response_iterator is None: - raise ValueError("The response handler was not initialized.") - - if not b"\n\n" in self.buffer: - for chunk in self.response_iterator: - self.buffer += chunk - if b"\n\n" in self.buffer: - break - - if self.buffer == b"": - raise StopIteration() - - event_bytes = b"" - if b"\n\n" in self.buffer: - event_end_index = self.buffer.index(b"\n\n") - event_bytes = self.buffer[:event_end_index] - self.buffer = self.buffer[event_end_index:].lstrip() - else: - event_bytes = self.buffer - self.buffer = b"" - - return event_bytes - - def _process_event(self, event_data_str: str) -> T: - raise NotImplementedError("This method needs to be implemented.") - - def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - Calls the provided callback function with each event data. - """ - try: - for _ in self: - pass - except StopIteration: - pass - - -class AsyncAgentEventHandler(BaseAsyncAgentEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): - - async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: - event_type, event_data_obj = _parse_event(event_data_str) - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - await cast(Callable[[ThreadRun, "BaseAsyncAgentEventHandler"], Awaitable[None]], self.submit_tool_outputs)( - event_data_obj, self - ) - - func_rt: Optional[EventFunctionReturnT] = None - try: - if isinstance(event_data_obj, MessageDeltaChunk): - func_rt = await self.on_message_delta(event_data_obj) - elif isinstance(event_data_obj, ThreadMessage): - func_rt = await self.on_thread_message(event_data_obj) - elif isinstance(event_data_obj, ThreadRun): - func_rt = await self.on_thread_run(event_data_obj) - elif isinstance(event_data_obj, RunStep): - func_rt = await self.on_run_step(event_data_obj) - elif isinstance(event_data_obj, RunStepDeltaChunk): - func_rt = await self.on_run_step_delta(event_data_obj) - elif event_type == AgentStreamEvent.ERROR: - func_rt = await self.on_error(event_data_obj) - elif event_type == AgentStreamEvent.DONE: - func_rt = await self.on_done() - else: - func_rt = await self.on_unhandled_event( - event_type, event_data_obj - ) # pylint: disable=assignment-from-none - except Exception as e: # pylint: disable=broad-exception-caught - logging.error("Error in event handler for event '%s': %s", event_type, e) - return event_type, event_data_obj, func_rt - - async def on_message_delta( - self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle message delta events. - - :param MessageDeltaChunk delta: The message delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_thread_message( - self, message: "ThreadMessage" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle thread message events. - - :param ThreadMessage message: The thread message. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_thread_run( - self, run: "ThreadRun" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle thread run events. - - :param ThreadRun run: The thread run. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle run step events. - - :param RunStep step: The run step. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_run_step_delta( - self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle run step delta events. - - :param RunStepDeltaChunk delta: The run step delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle error events. - - :param str data: The error event's data. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_done( - self, - ) -> Optional[EventFunctionReturnT]: - """Handle the completion of the stream. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_unhandled_event( - self, event_type: str, event_data: str # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle any unhandled event types. - - :param str event_type: The event type. - :param Any event_data: The event's data. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - -class AgentEventHandler(BaseAgentEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): - - def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: - - event_type, event_data_obj = _parse_event(event_data_str) - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - cast(Callable[[ThreadRun, "BaseAgentEventHandler"], Awaitable[None]], self.submit_tool_outputs)( - event_data_obj, self - ) - - func_rt: Optional[EventFunctionReturnT] = None - try: - if isinstance(event_data_obj, MessageDeltaChunk): - func_rt = self.on_message_delta(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, ThreadMessage): - func_rt = self.on_thread_message(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, ThreadRun): - func_rt = self.on_thread_run(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, RunStep): - func_rt = self.on_run_step(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, RunStepDeltaChunk): - func_rt = self.on_run_step_delta(event_data_obj) # pylint: disable=assignment-from-none - elif event_type == AgentStreamEvent.ERROR: - func_rt = self.on_error(event_data_obj) # pylint: disable=assignment-from-none - elif event_type == AgentStreamEvent.DONE: - func_rt = self.on_done() # pylint: disable=assignment-from-none - else: - func_rt = self.on_unhandled_event(event_type, event_data_obj) # pylint: disable=assignment-from-none - except Exception as e: # pylint: disable=broad-exception-caught - logging.error("Error in event handler for event '%s': %s", event_type, e) - return event_type, event_data_obj, func_rt - - def on_message_delta( - self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle message delta events. - - :param MessageDeltaChunk delta: The message delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_thread_message( - self, message: "ThreadMessage" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle thread message events. - - :param ThreadMessage message: The thread message. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_thread_run(self, run: "ThreadRun") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle thread run events. - - :param ThreadRun run: The thread run. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle run step events. - - :param RunStep step: The run step. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_run_step_delta( - self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle run step delta events. - - :param RunStepDeltaChunk delta: The run step delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle error events. - - :param str data: The error event's data. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_done( - self, - ) -> Optional[EventFunctionReturnT]: - """Handle the completion of the stream.""" - return None - - def on_unhandled_event( - self, event_type: str, event_data: str # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle any unhandled event types. - - :param str event_type: The event type. - :param Any event_data: The event's data. - """ - return None - - -class AsyncAgentRunStream(Generic[BaseAsyncAgentEventHandlerT]): - def __init__( - self, - response_iterator: AsyncIterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, BaseAsyncAgentEventHandlerT], Awaitable[None]], - event_handler: BaseAsyncAgentEventHandlerT, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.submit_tool_outputs = submit_tool_outputs - self.event_handler.initialize( - self.response_iterator, - cast(Callable[[ThreadRun, BaseAsyncAgentEventHandler], Awaitable[None]], submit_tool_outputs), - ) - - async def __aenter__(self): - return self.event_handler - - async def __aexit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - result = close_method() - if asyncio.iscoroutine(result): - await result - - -class AgentRunStream(Generic[BaseAgentEventHandlerT]): - def __init__( - self, - response_iterator: Iterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, BaseAgentEventHandlerT], None], - event_handler: BaseAgentEventHandlerT, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.submit_tool_outputs = submit_tool_outputs - self.event_handler.initialize( - self.response_iterator, - cast(Callable[[ThreadRun, BaseAgentEventHandler], None], submit_tool_outputs), - ) - - def __enter__(self): - return self.event_handler - - def __exit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - close_method() - - -class OpenAIPageableListOfThreadMessage(OpenAIPageableListOfThreadMessageGenerated): - - @property - def text_messages(self) -> List[MessageTextContent]: - """Returns all text message contents in the messages. - - :rtype: List[MessageTextContent] - """ - texts = [content for msg in self.data for content in msg.text_messages] - return texts - - @property - def image_contents(self) -> List[MessageImageFileContent]: - """Returns all image file contents from image message contents in the messages. - - :rtype: List[MessageImageFileContent] - """ - return [content for msg in self.data for content in msg.image_contents] - - @property - def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: - """Returns all file citation annotations from text message annotations in the messages. - - :rtype: List[MessageTextFileCitationAnnotation] - """ - annotations = [annotation for msg in self.data for annotation in msg.file_citation_annotations] - return annotations - - @property - def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: - """Returns all file path annotations from text message annotations in the messages. - - :rtype: List[MessageTextFilePathAnnotation] - """ - annotations = [annotation for msg in self.data for annotation in msg.file_path_annotations] - return annotations - - def get_last_message_by_role(self, role: MessageRole) -> Optional[ThreadMessage]: - """Returns the last message from a sender in the specified role. - - :param role: The role of the sender. - :type role: MessageRole - - :return: The last message from a sender in the specified role. - :rtype: ~azure.ai.projects.models.ThreadMessage - """ - for msg in self.data: - if msg.role == role: - return msg - return None - - def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTextContent]: - """Returns the last text message from a sender in the specified role. - - :param role: The role of the sender. - :type role: MessageRole - - :return: The last text message from a sender in the specified role. - :rtype: ~azure.ai.projects.models.MessageTextContent - """ - for msg in self.data: - if msg.role == role: - for content in msg.content: - if isinstance(content, MessageTextContent): - return content - return None - - -__all__: List[str] = [ - "AgentEventHandler", - "AgentRunStream", - "AsyncAgentRunStream", - "AsyncFunctionTool", - "AsyncToolSet", - "AzureAISearchTool", - "AzureFunctionTool", - "BaseAsyncAgentEventHandler", - "BaseAgentEventHandler", - "CodeInterpreterTool", - "ConnectionProperties", - "AsyncAgentEventHandler", - "OpenAIPageableListOfThreadMessage", - "FileSearchTool", - "FunctionTool", - "OpenApiTool", - "BingCustomSearchTool", - "BingGroundingTool", - "StreamEventData", - "SharepointTool", - "FabricTool", - "AzureAISearchTool", - "SASTokenCredential", - "Tool", - "ToolSet", - "BaseAsyncAgentEventHandlerT", - "BaseAgentEventHandlerT", - "ThreadMessage", - "MessageTextFileCitationAnnotation", - "MessageDeltaChunk", - "MessageAttachment", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 480b7afc8931..ce28ebedca8b 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -6,9 +6,9 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase import json -import sys from typing import Any, Callable, Dict, IO, Iterable, Iterator, List, Optional, TYPE_CHECKING, TypeVar, Union, overload import urllib.parse @@ -35,14 +35,9 @@ from .._serialization import Deserializer, Serializer from .._vendor import prepare_multipart_form_data -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore - if TYPE_CHECKING: from .. import _types -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +JSON = MutableMapping[str, Any] _Unset: Any = object() T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] @@ -367,7 +362,7 @@ def build_agents_list_messages_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") if run_id is not None: - _params["runId"] = _SERIALIZER.query("run_id", run_id, "str") + _params["run_id"] = _SERIALIZER.query("run_id", run_id, "str") if limit is not None: _params["limit"] = _SERIALIZER.query("limit", limit, "int") if order is not None: @@ -1765,16 +1760,12 @@ def create_agent( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1860,16 +1851,12 @@ def list_agents( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1929,16 +1916,12 @@ def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2184,16 +2167,12 @@ def update_agent( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2253,16 +2232,12 @@ def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionSta params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2422,16 +2397,12 @@ def create_thread( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2491,16 +2462,12 @@ def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2663,16 +2630,12 @@ def update_thread( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2732,16 +2695,12 @@ def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletion params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2827,16 +2786,12 @@ def list_threads( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3023,16 +2978,12 @@ def create_message( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3126,16 +3077,12 @@ def list_messages( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3198,16 +3145,12 @@ def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3367,16 +3310,12 @@ def update_message( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3741,16 +3680,12 @@ def create_run( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3840,16 +3775,12 @@ def list_runs( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3912,16 +3843,12 @@ def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadR params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4081,16 +4008,12 @@ def update_run( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4262,16 +4185,12 @@ def submit_tool_outputs_to_run( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4334,16 +4253,12 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4660,16 +4575,12 @@ def create_thread_and_run( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4749,16 +4660,12 @@ def get_run_step( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4859,16 +4766,12 @@ def list_run_steps( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4932,16 +4835,12 @@ def list_files( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5012,16 +4911,12 @@ def _upload_file(self, body: Union[_models._models.UploadFileRequest, JSON], **k params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5081,16 +4976,12 @@ def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5150,16 +5041,12 @@ def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5219,16 +5106,12 @@ def _get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5311,16 +5194,12 @@ def list_vector_stores( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5501,16 +5380,12 @@ def create_vector_store( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5570,16 +5445,12 @@ def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Vecto params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5740,16 +5611,12 @@ def modify_vector_store( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5810,16 +5677,12 @@ def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Ve params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5914,16 +5777,12 @@ def list_vector_store_files( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6080,16 +5939,12 @@ def create_vector_store_file( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6152,16 +6007,12 @@ def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: An params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6229,16 +6080,12 @@ def delete_vector_store_file( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6395,16 +6242,12 @@ def create_vector_store_file_batch( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6469,16 +6312,12 @@ def get_vector_store_file_batch( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6544,16 +6383,12 @@ def cancel_vector_store_file_batch( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6652,16 +6487,12 @@ def list_vector_store_file_batch_files( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6736,16 +6567,12 @@ def _get_workspace(self, **kwargs: Any) -> _models._models.GetWorkspaceResponse: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6823,16 +6650,12 @@ def _list_connections( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6894,16 +6717,12 @@ def _get_connection(self, connection_name: str, **kwargs: Any) -> _models._model params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6999,16 +6818,12 @@ def _get_connection_with_secrets( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7093,16 +6908,12 @@ def _get_app_insights( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7182,16 +6993,12 @@ def get(self, id: str, **kwargs: Any) -> _models.Evaluation: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7312,16 +7119,12 @@ def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwarg params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7391,18 +7194,14 @@ def prepare_request(next_link=None): params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + "self._config.subscription_id", self._config.subscription_id, "str" ), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7420,18 +7219,14 @@ def prepare_request(next_link=None): "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + "self._config.subscription_id", self._config.subscription_id, "str" ), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7565,16 +7360,12 @@ def update( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7640,16 +7431,12 @@ def get_schedule(self, name: str, **kwargs: Any) -> _models.EvaluationSchedule: params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7787,16 +7574,12 @@ def create_or_replace_schedule( params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7871,18 +7654,14 @@ def prepare_request(next_link=None): params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + "self._config.subscription_id", self._config.subscription_id, "str" ), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7900,18 +7679,14 @@ def prepare_request(next_link=None): "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + "self._config.subscription_id", self._config.subscription_id, "str" ), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7971,16 +7746,12 @@ def disable_schedule(self, name: str, **kwargs: Any) -> None: # pylint: disable params=_params, ) path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True + "self._config.resource_group_name", self._config.resource_group_name, "str" ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 9cbaa76152b7..8bcb627aa475 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -1,3369 +1,15 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import io -import logging -import os -import sys -import time -from pathlib import Path -from typing import ( - IO, - TYPE_CHECKING, - Any, - Dict, - Iterator, - List, - Optional, - Sequence, - TextIO, - Union, - Callable, - Set, - cast, - overload, -) +from typing import List -from azure.core.exceptions import ResourceNotFoundError -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from .._vendor import FileType -from ..models._enums import AuthenticationType, ConnectionType, FilePurpose, RunStatus -from ..models._models import ( - GetAppInsightsResponse, - GetConnectionResponse, - GetWorkspaceResponse, - InternalConnectionPropertiesSASAuth, - ListConnectionsResponse, -) -from ..models._patch import ConnectionProperties -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import TelemetryOperations as TelemetryOperationsGenerated - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from openai import AzureOpenAI - - from azure.ai.inference import ChatCompletionsClient, EmbeddingsClient, ImageEmbeddingsClient - - from .. import _types - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - -logger = logging.getLogger(__name__) - - -class InferenceOperations: - - def __init__(self, outer_instance): - - # All returned inference clients will have this application id set on their user-agent. - # For more info on user-agent HTTP header, see: - # https://azure.github.io/azure-sdk/general_azurecore.html#telemetry-policy - USER_AGENT_APP_ID = "AIProjectClient" - - if hasattr(outer_instance, "_user_agent") and outer_instance._user_agent: - # If the calling application has set "user_agent" when constructing the AIProjectClient, - # take that value and prepend it to USER_AGENT_APP_ID. - self._user_agent = f"{outer_instance._user_agent}-{USER_AGENT_APP_ID}" - else: - self._user_agent = USER_AGENT_APP_ID - - self._outer_instance = outer_instance - - @distributed_trace - def get_chat_completions_client( - self, *, connection_name: Optional[str] = None, **kwargs - ) -> "ChatCompletionsClient": - """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - ChatCompletionsClient. - - At least one AI model that supports chat completions must be deployed in this resource. - - .. note:: The package `azure-ai-inference` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated chat completions client. - :rtype: ~azure.ai.inference.ChatCompletionsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = self._outer_instance.connections.get(connection_name=connection_name, include_credentials=True) - else: - if use_serverless_connection: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_chat_completions_client] connection = %s", str(connection)) - - try: - from azure.ai.inference import ChatCompletionsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_chat_completions_client] " - + "Creating ChatCompletionsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_chat_completions_client] " - + "Creating ChatCompletionsClient using Entra ID authentication" - ) - client = ChatCompletionsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_chat_completions_client] " - + "Creating ChatCompletionsClient using SAS authentication" - ) - raise ValueError( - "Getting chat completions client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_embeddings_client(self, *, connection_name: Optional[str] = None, **kwargs) -> "EmbeddingsClient": - """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - EmbeddingsClient. - - At least one AI model that supports text embeddings must be deployed in this resource. - - .. note:: The package `azure-ai-inference` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated text embeddings client - :rtype: ~azure.ai.inference.EmbeddingsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = self._outer_instance.connections.get(connection_name=connection_name, include_credentials=True) - else: - if use_serverless_connection: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) - - try: - from azure.ai.inference import EmbeddingsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = EmbeddingsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" - ) - client = EmbeddingsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" - ) - raise ValueError("Getting embeddings client from a connection with SAS authentication is not yet supported") - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_image_embeddings_client( - self, *, connection_name: Optional[str] = None, **kwargs - ) -> "ImageEmbeddingsClient": - """Get an authenticated ImageEmbeddingsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - ImageEmbeddingsClient. - - At least one AI model that supports image embeddings must be deployed in this resource. - - .. note:: The package `azure-ai-inference` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated image embeddings client - :rtype: ~azure.ai.inference.ImageEmbeddingsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = self._outer_instance.connections.get(connection_name=connection_name, include_credentials=True) - else: - if use_serverless_connection: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) - - try: - from azure.ai.inference import ImageEmbeddingsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ImageEmbeddingsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using Entra ID authentication" - ) - client = ImageEmbeddingsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using SAS authentication" - ) - raise ValueError( - "Getting image embeddings client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_azure_openai_client( - self, *, api_version: Optional[str] = None, connection_name: Optional[str] = None, **kwargs - ) -> "AzureOpenAI": - """Get an authenticated AzureOpenAI client (from the `openai` package) for the default - Azure OpenAI connection (if `connection_name` is not specificed), or from the Azure OpenAI - resource given by its connection name. - - .. note:: The package `openai` must be installed prior to calling this method. - - :keyword api_version: The Azure OpenAI api-version to use when creating the client. Optional. - See "Data plane - Inference" row in the table at - https://learn.microsoft.com/azure/ai-services/openai/reference#api-specs. If this keyword - is not specified, you must set the environment variable `OPENAI_API_VERSION` instead. - :paramtype api_version: str - :keyword connection_name: The name of a connection to an Azure OpenAI resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure OpenAI connection will be used. - :type connection_name: str - - :return: An authenticated AzureOpenAI client - :rtype: ~openai.AzureOpenAI - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure OpenAI connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `openai` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - if connection_name: - connection = self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs - ) - else: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, include_credentials=True, **kwargs - ) - - logger.debug("[InferenceOperations.get_azure_openai_client] connection = %s", str(connection)) - - try: - from openai import AzureOpenAI - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenAI SDK is not installed. Please install it using 'pip install openai'" - ) from e - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" - ) - client = AzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_azure_openai_client] " + "Creating AzureOpenAI using Entra ID authentication" - ) - try: - from azure.identity import get_bearer_token_provider - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "azure.identity package not installed. Please install it using 'pip install azure.identity'" - ) from e - client = AzureOpenAI( - # See https://learn.microsoft.com/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider # pylint: disable=line-too-long - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=api_version, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_azure_openai_client] " + "Creating AzureOpenAI using SAS authentication" - ) - raise ValueError( - "Getting an AzureOpenAI client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - -class ConnectionsOperations(ConnectionsOperationsGenerated): - - @distributed_trace - def get_default( - self, *, connection_type: ConnectionType, include_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if there are no connections of the given type. - - .. note:: - `get_default(connection_type=ConnectionType.AZURE_BLOB_STORAGE, include_credentials=True)` does not - currently work. It does work with `include_credentials=False`. - - :keyword connection_type: The connection type. Required. - :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :keyword include_credentials: Whether to populate the connection properties with authentication credentials. - Optional. - :type include_credentials: bool - :return: The connection properties. - :rtype: ~azure.ai.projects.models.ConnectionProperties - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_type: - raise ValueError("You must specify an connection type") - # Since there is no notion of default connection at the moment, list all connections in the category - # and return the first one (index 0), unless overridden by the environment variable DEFAULT_CONNECTION_INDEX. - connection_properties_list = self.list(connection_type=connection_type, **kwargs) - if len(connection_properties_list) > 0: - default_connection_index = int(os.getenv("DEFAULT_CONNECTION_INDEX", "0")) - if include_credentials: - return self.get( - connection_name=connection_properties_list[default_connection_index].name, - include_credentials=include_credentials, - **kwargs, - ) - return connection_properties_list[default_connection_index] - raise ResourceNotFoundError(f"No connection of type {connection_type} found") - - @distributed_trace - def get(self, *, connection_name: str, include_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: - """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if a connection with the given name was not found. - - .. note:: This method is not supported for Azure Blob Storage connections. - - :keyword connection_name: Connection Name. Required. - :type connection_name: str - :keyword include_credentials: Whether to populate the connection properties with authentication credentials. - Optional. - :type include_credentials: bool - :return: The connection properties, or `None` if a connection with this name does not exist. - :rtype: ~azure.ai.projects.models.ConnectionProperties - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_name: - raise ValueError("Connection name cannot be empty") - if include_credentials: - connection: GetConnectionResponse = self._get_connection_with_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - if connection.properties.auth_type == AuthenticationType.ENTRA_ID: - return ConnectionProperties(connection=connection, token_credential=self._config.credential) - if connection.properties.auth_type == AuthenticationType.SAS: - from ..models._patch import SASTokenCredential - - cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) - - token_credential = SASTokenCredential( - sas_token=cred_prop.credentials.sas, - credential=self._config.credential, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - project_name=self._config.project_name, - connection_name=connection_name, - ) - return ConnectionProperties(connection=connection, token_credential=token_credential) - - return ConnectionProperties(connection=connection) - connection = self._get_connection(connection_name=connection_name, **kwargs) - return ConnectionProperties(connection=connection) - - @distributed_trace - def list( - self, *, connection_type: Optional[ConnectionType] = None, **kwargs: Any - ) -> Sequence[ConnectionProperties]: - """List the properties of all connections, or all connections of a certain connection type. - - :keyword connection_type: The connection type. Optional. If provided, this method lists connections of this - type. If not provided, all connections are listed. - :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :return: A list of connection properties - :rtype: Sequence[~azure.ai.projects.models._models.ConnectionProperties] - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connections_list: ListConnectionsResponse = self._list_connections( - include_all=True, category=connection_type, **kwargs - ) - - # Iterate to create the simplified result property - connection_properties_list: List[ConnectionProperties] = [] - for connection in connections_list.value: - connection_properties_list.append(ConnectionProperties(connection=connection)) - - return connection_properties_list - - -# Internal helper functions to enable OpenTelemetry, used by both sync and async clients -def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: - if isinstance(destination, str): - # `destination` is the OTLP endpoint - # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage - try: - from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter # type: ignore - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenTelemetry OTLP exporter is not installed. " - + "Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" - ) from e - return OTLPSpanExporter(endpoint=destination) - - if isinstance(destination, io.TextIOWrapper): - if destination is sys.stdout: - # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long - try: - from opentelemetry.sdk.trace.export import ConsoleSpanExporter - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" - ) from e - - return ConsoleSpanExporter() - raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") - - return None - - -def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: - if isinstance(destination, str): - # `destination` is the OTLP endpoint - # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage - try: - # _logs are considered beta (not internal) in OpenTelemetry Python API/SDK. - # So it's ok to use it for local development, but we'll swallow - # any errors in case of any breaking changes on OTel side. - from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter # type: ignore # pylint: disable=import-error,no-name-in-module - except Exception as ex: # pylint: disable=broad-exception-caught - # since OTel logging is still in beta in Python, we're going to swallow any errors - # and just warn about them. - logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) - return None - - return OTLPLogExporter(endpoint=destination) - - if isinstance(destination, io.TextIOWrapper): - if destination is sys.stdout: - # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long - try: - from opentelemetry.sdk._logs.export import ConsoleLogExporter - - return ConsoleLogExporter() - except ModuleNotFoundError as ex: - # since OTel logging is still in beta in Python, we're going to swallow any errors - # and just warn about them. - logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) - return None - raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") - - return None - - -def _configure_tracing(span_exporter: Any) -> None: - if span_exporter is None: - return - - try: - from opentelemetry import trace - from opentelemetry.sdk.trace import TracerProvider - from opentelemetry.sdk.trace.export import SimpleSpanProcessor - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" - ) from e - - # if tracing was not setup before, we need to create a new TracerProvider - if not isinstance(trace.get_tracer_provider(), TracerProvider): - # If the provider is NoOpTracerProvider, we need to create a new TracerProvider - provider = TracerProvider() - trace.set_tracer_provider(provider) - - # get_tracer_provider returns opentelemetry.trace.TracerProvider - # however, we have opentelemetry.sdk.trace.TracerProvider, which implements - # add_span_processor method, though we need to cast it to fix type checking. - provider = cast(TracerProvider, trace.get_tracer_provider()) - provider.add_span_processor(SimpleSpanProcessor(span_exporter)) - - -def _configure_logging(log_exporter: Any) -> None: - if log_exporter is None: - return - - try: - # _events and _logs are considered beta (not internal) in - # OpenTelemetry Python API/SDK. - # So it's ok to use them for local development, but we'll swallow - # any errors in case of any breaking changes on OTel side. - from opentelemetry import _logs, _events - from opentelemetry.sdk._logs import LoggerProvider # pylint: disable=import-error,no-name-in-module - from opentelemetry.sdk._events import EventLoggerProvider # pylint: disable=import-error,no-name-in-module - from opentelemetry.sdk._logs.export import ( - SimpleLogRecordProcessor, - ) # pylint: disable=import-error,no-name-in-module - - if not isinstance(_logs.get_logger_provider(), LoggerProvider): - logger_provider = LoggerProvider() - _logs.set_logger_provider(logger_provider) - - # get_logger_provider returns opentelemetry._logs.LoggerProvider - # however, we have opentelemetry.sdk._logs.LoggerProvider, which implements - # add_log_record_processor method, though we need to cast it to fix type checking. - logger_provider = cast(LoggerProvider, _logs.get_logger_provider()) - logger_provider.add_log_record_processor(SimpleLogRecordProcessor(log_exporter)) - _events.set_event_logger_provider(EventLoggerProvider(logger_provider)) - except Exception as ex: # pylint: disable=broad-exception-caught - # since OTel logging is still in beta in Python, we're going to swallow any errors - # and just warn about them. - logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) - - -def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: # pylint: disable=unused-argument - """Enable tracing and logging to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. - - :param destination: `sys.stdout` to print telemetry to console or a string holding the - OpenTelemetry protocol (OTLP) endpoint. - If not provided, this method enables instrumentation, but does not configure OpenTelemetry - SDK to export traces and logs. - :type destination: Union[TextIO, str, None] - """ - span_exporter = _get_trace_exporter(destination) - _configure_tracing(span_exporter) - - log_exporter = _get_log_exporter(destination) - _configure_logging(log_exporter) - - # Silently try to load a set of relevant Instrumentors - try: - from azure.core.settings import settings - - settings.tracing_implementation = "opentelemetry" - except ModuleNotFoundError: - logger.warning( - "Azure SDK tracing plugin is not installed. " - + "Please install it using 'pip install azure-core-tracing-opentelemetry'" - ) - - try: - from azure.ai.inference.tracing import AIInferenceInstrumentor # type: ignore - - inference_instrumentor = AIInferenceInstrumentor() - if not inference_instrumentor.is_instrumented(): - inference_instrumentor.instrument() - except ModuleNotFoundError: - logger.warning( - "Could not call `AIInferenceInstrumentor().instrument()` since `azure-ai-inference` is not installed" - ) - - try: - from azure.ai.projects.telemetry.agents import AIAgentsInstrumentor - - agents_instrumentor = AIAgentsInstrumentor() - if not agents_instrumentor.is_instrumented(): - agents_instrumentor.instrument() - except Exception as exc: # pylint: disable=broad-exception-caught - logger.warning("Could not call `AIAgentsInstrumentor().instrument()`", exc_info=exc) - - try: - from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor # type: ignore - - OpenAIInstrumentor().instrument() - except ModuleNotFoundError: - logger.warning( - "Could not call `OpenAIInstrumentor().instrument()` since " - + "`opentelemetry-instrumentation-openai-v2` is not installed" - ) - - try: - from opentelemetry.instrumentation.langchain import LangchainInstrumentor # type: ignore - - LangchainInstrumentor().instrument() - except ModuleNotFoundError: - logger.warning( - "Could not call LangchainInstrumentor().instrument()` since " - + "`opentelemetry-instrumentation-langchain` is not installed" - ) - - -class TelemetryOperations(TelemetryOperationsGenerated): - - _connection_string: Optional[str] = None - - def __init__(self, *args, **kwargs): - self._outer_instance = kwargs.pop("outer_instance") - super().__init__(*args, **kwargs) - - def get_connection_string(self) -> str: - """Get the Application Insights connection string associated with the Project's Application Insights resource. - - :return: The Application Insights connection string if a the resource was enabled for the Project. - :rtype: str - :raises ~azure.core.exceptions.ResourceNotFoundError: An Application Insights resource was not - enabled for this project. - """ - if not self._connection_string: - # Get the AI Foundry project properties, including Application Insights resource URL if exists - get_workspace_response: GetWorkspaceResponse = ( - self._outer_instance.connections._get_workspace() # pylint: disable=protected-access - ) - - if not get_workspace_response.properties.application_insights: - raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") - - # Make a GET call to the Application Insights resource URL to get the connection string - app_insights_respose: GetAppInsightsResponse = self._get_app_insights( - app_insights_resource_url=get_workspace_response.properties.application_insights - ) - - self._connection_string = app_insights_respose.properties.connection_string - - return self._connection_string - - # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? - # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry - def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> None: - """Enables telemetry collection with OpenTelemetry for Azure AI clients and popular GenAI libraries. - - Following instrumentations are enabled (when corresponding packages are installed): - - - Azure AI Inference (`azure-ai-inference`) - - Azure AI Projects (`azure-ai-projects`) - - OpenAI (`opentelemetry-instrumentation-openai-v2`) - - Langchain (`opentelemetry-instrumentation-langchain`) - - The recording of prompt and completion messages is disabled by default. To enable it, set the - `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED` environment variable to `true`. - - When destination is provided, the method configures OpenTelemetry SDK to export traces to - stdout or OTLP (OpenTelemetry protocol) gRPC endpoint. It's recommended for local - development only. For production use, make sure to configure OpenTelemetry SDK directly. - - :keyword destination: Recommended for local testing only. Set it to `sys.stdout` for - tracing to console output, or a string holding the OpenTelemetry protocol (OTLP) - endpoint such as "http://localhost:4317. - If not provided, the method enables instrumentations, but does not configure OpenTelemetry - SDK to export traces. - :paramtype destination: Union[TextIO, str, None] - """ - _enable_telemetry(destination=destination, **kwargs) - - -class AgentsOperations(AgentsOperationsGenerated): - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - self._function_tool = _models.FunctionTool(set()) - - # pylint: disable=arguments-differ - @overload - def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :type body: Union[JSON, IO[bytes]] - :keyword model: The ID of the model to use. Required if `body` is not provided. - :paramtype model: str - :keyword name: The name of the new agent. - :paramtype name: Optional[str] - :keyword description: A description for the new agent. - :paramtype description: Optional[str] - :keyword instructions: System instructions for the agent. - :paramtype instructions: Optional[str] - :keyword tools: List of tools definitions for the agent. - :paramtype tools: Optional[List[_models.ToolDefinition]] - :keyword tool_resources: Resources used by the agent's tools. - :paramtype tool_resources: Optional[_models.ToolResources] - :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :paramtype toolset: Optional[_models.ToolSet] - :keyword temperature: Sampling temperature for generating agent responses. - :paramtype temperature: Optional[float] - :keyword top_p: Nucleus sampling parameter. - :paramtype top_p: Optional[float] - :keyword response_format: Response format for tool calls. - :paramtype response_format: Optional["_types.AgentsApiResponseFormatOption"] - :keyword metadata: Key/value pairs for storing additional information. - :paramtype metadata: Optional[Dict[str, str]] - :keyword content_type: Content type of the body. - :paramtype content_type: str - :return: An Agent object. - :rtype: _models.Agent - :raises: HttpResponseError for HTTP errors. - """ - - self._validate_tools_and_tool_resources(tools, tool_resources) - - if body is not _Unset: - if isinstance(body, io.IOBase): - return super().create_agent(body=body, content_type=content_type, **kwargs) - return super().create_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - new_agent = super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - return new_agent - - # pylint: disable=arguments-differ - @overload - def update_agent( # pylint: disable=arguments-differ - self, - agent_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def update_agent( # pylint: disable=arguments-differ - self, - agent_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_agent( - self, - agent_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - self._validate_tools_and_tool_resources(tools, tool_resources) - - if body is not _Unset: - if isinstance(body, io.IOBase): - return super().update_agent(body=body, content_type=content_type, **kwargs) - return super().update_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - return super().update_agent( - agent_id=agent_id, - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def _validate_tools_and_tool_resources( - self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] - ): - if tool_resources is None: - return - if tools is None: - tools = [] - - if tool_resources.file_search is not None and not any( - isinstance(tool, _models.FileSearchToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" - ) - if tool_resources.code_interpreter is not None and not any( - isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" - ) - - # pylint: disable=arguments-differ - @overload - def create_run( # pylint: disable=arguments-differ - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, - thread_id: str, - body: IO[bytes], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - agent_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif agent_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - include=include, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return response - - @distributed_trace - def create_and_process_run( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword toolset: The Collection of tools and resources (alternative to `tools` and - `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.projects.models.AgentsApiResponseFormatMode or - ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype sleep_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = self.create_run( - thread_id=thread_id, - include=include, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=toolset.definitions if toolset else None, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - while run.status in [ - RunStatus.QUEUED, - RunStatus.IN_PROGRESS, - RunStatus.REQUIRES_ACTION, - ]: - time.sleep(sleep_interval) - run = self.get_run(thread_id=thread_id, run_id=run.id) - - if run.status == RunStatus.REQUIRES_ACTION and isinstance( - run.required_action, _models.SubmitToolOutputsAction - ): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logging.warning("No tool calls provided - cancelling run") - self.cancel_run(thread_id=thread_id, run_id=run.id) - break - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = _models.ToolSet() - toolset.add(self._function_tool) - tool_outputs = toolset.execute_tool_calls(tool_calls) - - logging.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) - - logging.info("Current run status: %s", run.status) - - return run - - @overload - def create_stream( - self, - thread_id: str, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - agent_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: None = None, - **kwargs: Any, - ) -> _models.AgentRunStream[_models.AgentEventHandler]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: None - :paramtype event_handler: None. _models.AgentEventHandler will be applied as default. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, - thread_id: str, - *, - agent_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: _models.BaseAgentEventHandlerT, - **kwargs: Any, - ) -> _models.AgentRunStream[_models.BaseAgentEventHandlerT]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - event_handler: None = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AgentRunStream[_models.AgentEventHandler]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword event_handler: None - :paramtype event_handler: None. _models.AgentEventHandler will be applied as default. - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - event_handler: _models.BaseAgentEventHandlerT, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AgentRunStream[_models.BaseAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - agent_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.BaseAgentEventHandlerT] = None, - **kwargs: Any, - ) -> _models.AgentRunStream[_models.BaseAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif agent_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - include=include, - agent_id=agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - if not event_handler: - event_handler = cast(_models.BaseAgentEventHandlerT, _models.AgentEventHandler()) - return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - # pylint: disable=arguments-differ - @overload - def submit_tool_outputs_to_run( # pylint: disable=arguments-differ - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, - run_id, - tool_outputs=tool_outputs, - stream_parameter=False, - stream=False, - **kwargs, - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return response - - @overload - def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]], - *, - event_handler: _models.BaseAgentEventHandler, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: _models.BaseAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: _models.BaseAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) - - def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: _models.BaseAgentEventHandler) -> None: - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return - - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if ( - any(tool_call.type == "function" for tool_call in tool_calls) - and len(self._function_tool.definitions) > 0 - ): - - toolset = _models.ToolSet() - toolset.add(self._function_tool) - tool_outputs = toolset.execute_tool_calls(tool_calls) - - logger.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=event_handler, - ) - - # pylint: disable=arguments-differ - @overload - def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def upload_file( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: _models.OpenAIFile - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - # If a JSON body is provided directly, pass it along - if body is not None: - return super()._upload_file(body=body, **kwargs) - - # Convert FilePurpose enum to string if necessary - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - # If file content is passed in directly - if file is not None and purpose is not None: - return super()._upload_file(body={"file": file, "purpose": purpose, "filename": filename}, **kwargs) - - # If a file path is provided - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # If no explicit filename is provided, use the base name - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return super()._upload_file(body={"file": file_content, "purpose": purpose}, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}") from e - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file_and_poll( - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: _models.OpenAIFile - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - uploaded_file = self.upload_file(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = self.upload_file(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_file_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - time.sleep(sleep_interval) - uploaded_file = self.get_file(uploaded_file.id) - - return uploaded_file - - @overload - def create_vector_store_and_poll( - self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_and_poll( - self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_and_poll( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store = super().create_vector_store( - body=body, content_type=content_type or "application/json", **kwargs - ) - elif isinstance(body, io.IOBase): - vector_store = super().create_vector_store(body=body, content_type=content_type, **kwargs) - else: - raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") - else: - store_configuration = None - if data_sources: - store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) - - vector_store = super().create_vector_store( - file_ids=file_ids, - store_configuration=store_configuration, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - - while vector_store.status == "in_progress": - time.sleep(sleep_interval) - vector_store = super().get_vector_store(vector_store.id) - - return vector_store - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword content_type: Body parameter content-type. Defaults to "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file_batch = super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file_batch = super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") - else: - vector_store_file_batch = super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - file_ids=file_ids, - data_sources=data_sources, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file_batch.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file_batch = super().get_vector_store_file_batch( - vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id - ) - - return vector_store_file_batch - - @distributed_trace - def get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: - """ - Returns file content as byte stream for given file_id. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: An iterator that yields bytes from the file content. - :rtype: Iterator[bytes] - :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. - """ - kwargs["stream"] = True - response = super()._get_file_content(file_id, **kwargs) - return cast(Iterator[bytes], response) - - @distributed_trace - def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: - """ - Synchronously saves file content retrieved using a file identifier to the specified local directory. - - :param file_id: The unique identifier for the file to retrieve. - :type file_id: str - :param file_name: The name of the file to be saved. - :type file_name: str - :param target_dir: The directory where the file should be saved. Defaults to the current working directory. - :type target_dir: Optional[Union[str, Path]] - :raises ValueError: If the target path is not a directory or the file name is invalid. - :raises RuntimeError: If file content retrieval fails or no content is found. - :raises TypeError: If retrieved chunks are not bytes-like objects. - :raises IOError: If writing to the file fails. - """ - try: - # Determine and validate the target directory - path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() - path.mkdir(parents=True, exist_ok=True) - if not path.is_dir(): - raise ValueError(f"The target path '{path}' is not a directory.") - - # Sanitize and validate the file name - sanitized_file_name = Path(file_name).name - if not sanitized_file_name: - raise ValueError("The provided file name is invalid.") - - # Retrieve the file content - file_content_stream = self.get_file_content(file_id) - if not file_content_stream: - raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") - - target_file_path = path / sanitized_file_name - - # Write the file content to disk - with target_file_path.open("wb") as file: - for chunk in file_content_stream: - if isinstance(chunk, (bytes, bytearray)): - file.write(chunk) - else: - raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - - logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) - - except (ValueError, RuntimeError, TypeError, IOError) as e: - logger.error("An error occurred in save_file: %s", e) - raise - - @overload - def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_and_poll( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file = super().create_vector_store_file( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file = super().create_vector_store_file( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") - else: - vector_store_file = super().create_vector_store_file( - vector_store_id=vector_store_id, - file_id=file_id, - data_source=data_source, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file = super().get_vector_store_file( - vector_store_id=vector_store_id, file_id=vector_store_file.id - ) - - return vector_store_file - - @distributed_trace - def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param agent_id: Identifier of the agent. Required. - :type agent_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - return super().delete_agent(agent_id, **kwargs) - - @overload - def enable_auto_function_calls(self, *, functions: Set[Callable[..., Any]]) -> None: - """Enables tool calls to be executed automatically during create_and_process_run or streaming. - If this is not set, functions must be called manually. - :keyword functions: A set of callable functions to be used as tools. - :type functions: Set[Callable[..., Any]] - """ - - @overload - def enable_auto_function_calls(self, *, function_tool: _models.FunctionTool) -> None: - """Enables tool calls to be executed automatically during create_and_process_run or streaming. - If this is not set, functions must be called manually. - :keyword function_tool: A FunctionTool object representing the tool to be used. - :type function_tool: Optional[_models.FunctionTool] - """ - - @overload - def enable_auto_function_calls(self, *, toolset: _models.ToolSet) -> None: - """Enables tool calls to be executed automatically during create_and_process_run or streaming. - If this is not set, functions must be called manually. - :keyword toolset: A ToolSet object representing the set of tools to be used. - :type toolset: Optional[_models.ToolSet] - """ - - @distributed_trace - def enable_auto_function_calls( - self, - *, - functions: Optional[Set[Callable[..., Any]]] = None, - function_tool: Optional[_models.FunctionTool] = None, - toolset: Optional[_models.ToolSet] = None, - ) -> None: - """Enables tool calls to be executed automatically during create_and_process_run or streaming. - If this is not set, functions must be called manually. - :keyword functions: A set of callable functions to be used as tools. - :type functions: Set[Callable[..., Any]] - :keyword function_tool: A FunctionTool object representing the tool to be used. - :type function_tool: Optional[_models.FunctionTool] - :keyword toolset: A ToolSet object representing the set of tools to be used. - :type toolset: Optional[_models.ToolSet] - """ - if functions: - self._function_tool = _models.FunctionTool(functions) - elif function_tool: - self._function_tool = function_tool - elif toolset: - tool = toolset.get_tool(_models.FunctionTool) - self._function_tool = tool - - -__all__: List[str] = [ - "AgentsOperations", - "ConnectionsOperations", - "TelemetryOperations", - "InferenceOperations", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/prompts/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/prompts/__init__.py deleted file mode 100644 index f1e98bf1be1a..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/prompts/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -# pylint: disable=unused-import -try: - import prompty # pylint: disable=unused-import -except ImportError as exc: - raise ImportError( - "The 'prompty' package is required to use the 'azure.ai.projects.prompts' module. " - "Please install it by running 'pip install prompty'." - ) from exc - -from ._patch import patch_sdk as _patch_sdk, PromptTemplate - -_patch_sdk() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/prompts/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/prompts/_patch.py deleted file mode 100644 index 13fd07bcac99..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/prompts/_patch.py +++ /dev/null @@ -1,124 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -# pylint: disable=line-too-long,R,no-member -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" - -import traceback -from pathlib import Path -from typing import Any, Dict, List, Optional -from typing_extensions import Self -from prompty import headless, load, prepare -from prompty.core import Prompty -from ._utils import remove_leading_empty_space - - -class PromptTemplate: - """The helper class which takes variant of inputs, e.g. Prompty format or string, and returns the parsed prompt in an array. - Prompty library is required to be installed to use this class. - """ - - @classmethod - def from_prompty(cls, file_path: str) -> Self: - """Initialize a PromptTemplate object from a prompty file. - - :param file_path: The path to the prompty file. - :type file_path: str - :return: The PromptTemplate object. - :rtype: PromptTemplate - """ - if not file_path: - raise ValueError("Please provide file_path") - - # Get the absolute path of the file by `traceback.extract_stack()`, it's "-2" because: - # In the stack, the last function is the current function. - # The second last function is the caller function, which is the root of the file_path. - stack = traceback.extract_stack() - caller = Path(stack[-2].filename) - abs_file_path = Path(caller.parent / Path(file_path)).resolve().absolute() - - prompty = load(str(abs_file_path)) - prompty.template.type = "mustache" # For Azure, default to mustache instead of Jinja2 - return cls(prompty=prompty) - - @classmethod - def from_string(cls, prompt_template: str, api: str = "chat", model_name: Optional[str] = None) -> Self: - """Initialize a PromptTemplate object from a message template. - - :param prompt_template: The prompt template string. - :type prompt_template: str - :param api: The API type, e.g. "chat" or "completion". - :type api: str - :param model_name: The model name, e.g. "gpt-4o-mini". - :type model_name: str - :return: The PromptTemplate object. - :rtype: PromptTemplate - """ - prompt_template = remove_leading_empty_space(prompt_template) - prompty = headless(api=api, content=prompt_template) - prompty.template.type = "mustache" # For Azure, default to mustache instead of Jinja2 - prompty.template.parser = "prompty" - return cls( - api=api, - model_name=model_name, - prompty=prompty, - ) - - def __init__( - self, - *, - api: str = "chat", - prompty: Optional[Prompty] = None, - prompt_template: Optional[str] = None, - model_name: Optional[str] = None, - ) -> None: - self.prompty = prompty - if self.prompty is not None: - self.model_name = ( - self.prompty.model.configuration["azure_deployment"] - if "azure_deployment" in self.prompty.model.configuration - else None - ) - self.parameters = self.prompty.model.parameters - self._config = {} - elif prompt_template is not None: - self.model_name = model_name - self.parameters = {} - # _config is a dict to hold the internal configuration - self._config = { - "api": api if api is not None else "chat", - "prompt_template": prompt_template, - } - else: - raise ValueError("Please pass valid arguments for PromptTemplate") - - def create_messages(self, data: Optional[Dict[str, Any]] = None, **kwargs) -> List[Dict[str, Any]]: - """Render the prompt template with the given data. - - :param data: The data to render the prompt template with. - :type data: Optional[Dict[str, Any]] - :return: The rendered prompt template. - :rtype: List[Dict[str, Any]] - """ - if data is None: - data = kwargs - - if self.prompty is not None: - parsed = prepare(self.prompty, data) - return parsed # type: ignore - else: - raise ValueError("Please provide valid prompt template") - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/prompts/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/prompts/_utils.py deleted file mode 100644 index a85e193322e5..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/prompts/_utils.py +++ /dev/null @@ -1,39 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import sys - - -def remove_leading_empty_space(multiline_str: str) -> str: - """ - Processes a multiline string by: - 1. Removing empty lines - 2. Finding the minimum leading spaces - 3. Indenting all lines to the minimum level - - :param multiline_str: The input multiline string. - :type multiline_str: str - :return: The processed multiline string. - :rtype: str - """ - lines = multiline_str.splitlines() - start_index = 0 - while start_index < len(lines) and lines[start_index].strip() == "": - start_index += 1 - - # Find the minimum number of leading spaces - min_spaces = sys.maxsize - for line in lines[start_index:]: - if len(line.strip()) == 0: - continue - spaces = len(line) - len(line.lstrip()) - spaces += line.lstrip().count("\t") * 2 # Count tabs as 2 spaces - min_spaces = min(min_spaces, spaces) - - # Remove leading spaces and indent to the minimum level - processed_lines = [] - for line in lines[start_index:]: - processed_lines.append(line[min_spaces:]) - - return "\n".join(processed_lines) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py deleted file mode 100644 index a5e9e67bf233..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._trace_function import trace_function - -__all__ = [ - "trace_function", -] -__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py deleted file mode 100644 index 7a1284e88af5..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py +++ /dev/null @@ -1,204 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import functools -import asyncio # pylint: disable=do-not-import-asyncio -from typing import Any, Callable, Optional, Dict - -try: - # pylint: disable = no-name-in-module - from opentelemetry import trace as opentelemetry_trace - - tracer = opentelemetry_trace.get_tracer(__name__) - _tracing_library_available = True -except ModuleNotFoundError: - _tracing_library_available = False - -if _tracing_library_available: - - def trace_function(span_name: Optional[str] = None): - """ - A decorator for tracing function calls using OpenTelemetry. - - This decorator handles various data types for function parameters and return values, - and records them as attributes in the trace span. The supported data types include: - - Basic data types: str, int, float, bool - - Collections: list, dict, tuple, set - - Special handling for collections: - - If a collection (list, dict, tuple, set) contains nested collections, the entire collection - is converted to a string before being recorded as an attribute. - - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. - - Object types are omitted, and the corresponding parameter is not traced. - - :param span_name: The name of the span. If not provided, the function name is used. - :type span_name: Optional[str] - :return: The decorated function with tracing enabled. - :rtype: Callable - """ - - def decorator(func: Callable) -> Callable: - @functools.wraps(func) - async def async_wrapper(*args: Any, **kwargs: Any) -> Any: - """ - Wrapper function for asynchronous functions. - - :param args: Positional arguments passed to the function. - :type args: Tuple[Any] - :return: The result of the decorated asynchronous function. - :rtype: Any - """ - name = span_name if span_name else func.__name__ - with tracer.start_as_current_span(name) as span: - try: - # Sanitize parameters and set them as attributes - sanitized_params = sanitize_parameters(func, *args, **kwargs) - span.set_attributes(sanitized_params) - result = await func(*args, **kwargs) - sanitized_result = sanitize_for_attributes(result) - if sanitized_result is not None: - if isinstance(sanitized_result, (list, dict, tuple, set)): - if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): - sanitized_result = str(sanitized_result) - span.set_attribute("code.function.return.value", sanitized_result) # type: ignore - return result - except Exception as e: - span.record_exception(e) - span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore - raise - - @functools.wraps(func) - def sync_wrapper(*args: Any, **kwargs: Any) -> Any: - """ - Wrapper function for synchronous functions. - - :param args: Positional arguments passed to the function. - :type args: Tuple[Any] - :return: The result of the decorated synchronous function. - :rtype: Any - """ - name = span_name if span_name else func.__name__ - with tracer.start_as_current_span(name) as span: - try: - # Sanitize parameters and set them as attributes - sanitized_params = sanitize_parameters(func, *args, **kwargs) - span.set_attributes(sanitized_params) - result = func(*args, **kwargs) - sanitized_result = sanitize_for_attributes(result) - if sanitized_result is not None: - if isinstance(sanitized_result, (list, dict, tuple, set)): - if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): - sanitized_result = str(sanitized_result) - span.set_attribute("code.function.return.value", sanitized_result) # type: ignore - return result - except Exception as e: - span.record_exception(e) - span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore - raise - - # Determine if the function is async - if asyncio.iscoroutinefunction(func): - return async_wrapper - return sync_wrapper - - return decorator - -else: - # Define a no-op decorator if OpenTelemetry is not available - def trace_function(span_name: Optional[str] = None): # pylint: disable=unused-argument - """ - A no-op decorator for tracing function calls when OpenTelemetry is not available. - - :param span_name: Not used in this version. - :type span_name: Optional[str] - :return: The original function. - :rtype: Callable - """ - - def decorator(func: Callable) -> Callable: - return func - - return decorator - - -def sanitize_parameters(func, *args, **kwargs) -> Dict[str, Any]: - """ - Sanitize function parameters to include only built-in data types. - - :param func: The function being decorated. - :type func: Callable - :param args: Positional arguments passed to the function. - :type args: Tuple[Any] - :return: A dictionary of sanitized parameters. - :rtype: Dict[str, Any] - """ - import inspect - - params = inspect.signature(func).parameters - sanitized_params = {} - - for i, (name, param) in enumerate(params.items()): - if param.default == inspect.Parameter.empty and i < len(args): - value = args[i] - else: - value = kwargs.get(name, param.default) - - sanitized_value = sanitize_for_attributes(value) - # Check if the collection has nested collections - if isinstance(sanitized_value, (list, dict, tuple, set)): - if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_value): - sanitized_value = str(sanitized_value) - if sanitized_value is not None: - sanitized_params["code.function.parameter." + name] = sanitized_value - - return sanitized_params - - -# pylint: disable=R0911 -def sanitize_for_attributes(value: Any, is_recursive: bool = False) -> Any: - """ - Sanitize a value to be used as an attribute. - - :param value: The value to sanitize. - :type value: Any - :param is_recursive: Indicates if the function is being called recursively. Default is False. - :type is_recursive: bool - :return: The sanitized value or None if the value is not a supported type. - :rtype: Any - """ - if isinstance(value, (str, int, float, bool)): - return value - if isinstance(value, list): - return [ - sanitize_for_attributes(item, True) - for item in value - if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) - ] - if isinstance(value, dict): - retval = { - k: sanitize_for_attributes(v, True) - for k, v in value.items() - if isinstance(v, (str, int, float, bool, list, dict, tuple, set)) - } - # dict to compatible with span attribute, so return it as a string - if is_recursive: - return retval - return str(retval) - if isinstance(value, tuple): - return tuple( - sanitize_for_attributes(item, True) - for item in value - if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) - ) - if isinstance(value, set): - retval_set = { - sanitize_for_attributes(item, True) - for item in value - if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) - } - if is_recursive: - return retval_set - return str(retval_set) - return None diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py deleted file mode 100644 index 34fb7e5f7cd8..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._ai_agents_instrumentor import AIAgentsInstrumentor - -__all__ = [ - "AIAgentsInstrumentor", -] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py deleted file mode 100644 index 2d4dcb1d1b54..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ /dev/null @@ -1,1907 +0,0 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import copy -import functools -import importlib -import json -import logging -import os -from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast -from urllib.parse import urlparse - -from azure.ai.projects import _types -from azure.ai.projects.models import AgentRunStream, AsyncAgentRunStream, _models -from azure.ai.projects.models._enums import AgentsApiResponseFormatMode, MessageRole, RunStepStatus -from azure.ai.projects.models import ( - MessageAttachment, - MessageDeltaChunk, - MessageIncompleteDetails, - RequiredFunctionToolCall, - RunStep, - RunStepDeltaChunk, - RunStepFunctionToolCall, - RunStepToolCallDetails, - SubmitToolOutputsAction, - ThreadMessage, - ThreadRun, - ToolDefinition, - ToolOutput, - ToolResources, -) -from azure.ai.projects.models._patch import AgentEventHandler, AsyncAgentEventHandler, ToolSet -from azure.ai.projects.telemetry.agents._utils import ( - AZ_AI_AGENT_SYSTEM, - ERROR_TYPE, - GEN_AI_AGENT_DESCRIPTION, - GEN_AI_AGENT_ID, - GEN_AI_AGENT_NAME, - GEN_AI_EVENT_CONTENT, - GEN_AI_MESSAGE_ID, - GEN_AI_MESSAGE_STATUS, - GEN_AI_RESPONSE_MODEL, - GEN_AI_SYSTEM, - GEN_AI_SYSTEM_MESSAGE, - GEN_AI_THREAD_ID, - GEN_AI_THREAD_RUN_ID, - GEN_AI_THREAD_RUN_STATUS, - GEN_AI_USAGE_INPUT_TOKENS, - GEN_AI_USAGE_OUTPUT_TOKENS, - OperationName, - start_span, -) -from azure.core import CaseInsensitiveEnumMeta # type: ignore -from azure.core.settings import settings -from azure.core.tracing import AbstractSpan - -_Unset: Any = object() - -try: - # pylint: disable = no-name-in-module - from opentelemetry.trace import Span, StatusCode - - _tracing_library_available = True -except ModuleNotFoundError: - _tracing_library_available = False - - -__all__ = [ - "AIAgentsInstrumentor", -] - - -_agents_traces_enabled: bool = False -_trace_agents_content: bool = False - - -class TraceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): # pylint: disable=C4747 - """An enumeration class to represent different types of traces.""" - - AGENTS = "Agents" - - -class AIAgentsInstrumentor: - """ - A class for managing the trace instrumentation of AI Agents. - - This class allows enabling or disabling tracing for AI Agents. - and provides functionality to check whether instrumentation is active. - - """ - - def __init__(self): - if not _tracing_library_available: - raise ModuleNotFoundError( - "Azure Core Tracing Opentelemetry is not installed. " - "Please install it using 'pip install azure-core-tracing-opentelemetry'" - ) - # In the future we could support different versions from the same library - # and have a parameter that specifies the version to use. - self._impl = _AIAgentsInstrumentorPreview() - - def instrument(self, enable_content_recording: Optional[bool] = None) -> None: - """ - Enable trace instrumentation for AI Agents. - - :param enable_content_recording: Whether content recording is enabled as part - of the traces or not. Content in this context refers to chat message content - and function call tool related function names, function parameter names and - values. True will enable content recording, False will disable it. If no value - is provided, then the value read from environment variable - AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable - is not found, then the value will default to False. Please note that successive calls - to instrument will always apply the content recording value provided with the most - recent call to instrument (including applying the environment variable if no value is - provided and defaulting to false if the environment variable is not found), even if - instrument was already previously called without uninstrument being called in between - the instrument calls. - :type enable_content_recording: bool, optional - - """ - self._impl.instrument(enable_content_recording) - - def uninstrument(self) -> None: - """ - Remove trace instrumentation for AI Agents. - - This method removes any active instrumentation, stopping the tracing - of AI Agents. - """ - self._impl.uninstrument() - - def is_instrumented(self) -> bool: - """ - Check if trace instrumentation for AI Agents is currently enabled. - - :return: True if instrumentation is active, False otherwise. - :rtype: bool - """ - return self._impl.is_instrumented() - - def is_content_recording_enabled(self) -> bool: - """This function gets the content recording value. - - :return: A bool value indicating whether content recording is enabled. - :rtype: bool - """ - return self._impl.is_content_recording_enabled() - - -class _AIAgentsInstrumentorPreview: - # pylint: disable=R0904 - """ - A class for managing the trace instrumentation of AI Agents. - - This class allows enabling or disabling tracing for AI Agents. - and provides functionality to check whether instrumentation is active. - """ - - def _str_to_bool(self, s): - if s is None: - return False - return str(s).lower() == "true" - - def instrument(self, enable_content_recording: Optional[bool] = None): - """ - Enable trace instrumentation for AI Agents. - - :param enable_content_recording: Whether content recording is enabled as part - of the traces or not. Content in this context refers to chat message content - and function call tool related function names, function parameter names and - values. True will enable content recording, False will disable it. If no value - is provided, then the value read from environment variable - AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable - is not found, then the value will default to False. - - :type enable_content_recording: bool, optional - """ - if enable_content_recording is None: - var_value = os.environ.get("AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED") - enable_content_recording = self._str_to_bool(var_value) - if not self.is_instrumented(): - self._instrument_agents(enable_content_recording) - else: - self._set_enable_content_recording(enable_content_recording=enable_content_recording) - - def uninstrument(self): - """ - Disable trace instrumentation for AI Agents. - - This method removes any active instrumentation, stopping the tracing - of AI Agents. - """ - if self.is_instrumented(): - self._uninstrument_agents() - - def is_instrumented(self): - """ - Check if trace instrumentation for AI Agents is currently enabled. - - :return: True if instrumentation is active, False otherwise. - :rtype: bool - """ - return self._is_instrumented() - - def set_enable_content_recording(self, enable_content_recording: bool = False) -> None: - """This function sets the content recording value. - - :param enable_content_recording: Indicates whether tracing of message content should be enabled. - This also controls whether function call tool function names, - parameter names and parameter values are traced. - :type enable_content_recording: bool - """ - self._set_enable_content_recording(enable_content_recording=enable_content_recording) - - def is_content_recording_enabled(self) -> bool: - """This function gets the content recording value. - - :return: A bool value indicating whether content tracing is enabled. - :rtype bool - """ - return self._is_content_recording_enabled() - - def _set_attributes(self, span: "AbstractSpan", *attrs: Tuple[str, Any]) -> None: - for attr in attrs: - key, value = attr - if value is not None: - span.add_attribute(key, value) - - def _parse_url(self, url): - parsed = urlparse(url) - server_address = parsed.hostname - port = parsed.port - return server_address, port - - def _remove_function_call_names_and_arguments(self, tool_calls: list) -> list: - tool_calls_copy = copy.deepcopy(tool_calls) - for tool_call in tool_calls_copy: - if "function" in tool_call: - if "name" in tool_call["function"]: - del tool_call["function"]["name"] - if "arguments" in tool_call["function"]: - del tool_call["function"]["arguments"] - if not tool_call["function"]: - del tool_call["function"] - return tool_calls_copy - - def _create_event_attributes( - self, - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - thread_run_id: Optional[str] = None, - message_id: Optional[str] = None, - message_status: Optional[str] = None, - usage: Optional[_models.RunStepCompletionUsage] = None, - ) -> Dict[str, Any]: - attrs: Dict[str, Any] = {GEN_AI_SYSTEM: AZ_AI_AGENT_SYSTEM} - if thread_id: - attrs[GEN_AI_THREAD_ID] = thread_id - - if agent_id: - attrs[GEN_AI_AGENT_ID] = agent_id - - if thread_run_id: - attrs[GEN_AI_THREAD_RUN_ID] = thread_run_id - - if message_id: - attrs[GEN_AI_MESSAGE_ID] = message_id - - if message_status: - attrs[GEN_AI_MESSAGE_STATUS] = self._status_to_string(message_status) - - if usage: - attrs[GEN_AI_USAGE_INPUT_TOKENS] = usage.prompt_tokens - attrs[GEN_AI_USAGE_OUTPUT_TOKENS] = usage.completion_tokens - - return attrs - - def add_thread_message_event( - self, span, message: ThreadMessage, usage: Optional[_models.RunStepCompletionUsage] = None - ) -> None: - content_body = {} - if _trace_agents_content: - for content in message.content: - typed_content = content.get(content.type, None) - if typed_content: - content_details = {"value": self._get_field(typed_content, "value")} - annotations = self._get_field(typed_content, "annotations") - if annotations: - content_details["annotations"] = [a.as_dict() for a in annotations] - content_body[content.type] = content_details - - self._add_message_event( - span, - self._get_role(message.role), - content_body, - attachments=message.attachments, - thread_id=message.thread_id, - agent_id=message.agent_id, - message_id=message.id, - thread_run_id=message.run_id, - message_status=message.status, - incomplete_details=message.incomplete_details, - usage=usage, - ) - - def _add_message_event( - self, - span, - role: str, - content: Any, - attachments: Any = None, # Optional[List[MessageAttachment]] or dict - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - message_id: Optional[str] = None, - thread_run_id: Optional[str] = None, - message_status: Optional[str] = None, - incomplete_details: Optional[MessageIncompleteDetails] = None, - usage: Optional[_models.RunStepCompletionUsage] = None, - ) -> None: - # TODO document new fields - - event_body = {} - if _trace_agents_content: - event_body["content"] = content - if attachments: - event_body["attachments"] = [] - for attachment in attachments: - attachment_body = {"id": attachment.file_id} - if attachment.tools: - attachment_body["tools"] = [self._get_field(tool, "type") for tool in attachment.tools] - event_body["attachments"].append(attachment_body) - - if incomplete_details: - event_body["incomplete_details"] = incomplete_details - event_body["role"] = role - - attributes = self._create_event_attributes( - thread_id=thread_id, - agent_id=agent_id, - thread_run_id=thread_run_id, - message_id=message_id, - message_status=message_status, - usage=usage, - ) - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body, ensure_ascii=False) - span.span_instance.add_event(name=f"gen_ai.{role}.message", attributes=attributes) - - def _get_field(self, obj: Any, field: str) -> Any: - if not obj: - return None - - if isinstance(obj, dict): - return obj.get(field, None) - - return getattr(obj, field, None) - - def _add_instructions_event( - self, - span: "AbstractSpan", - instructions: Optional[str], - additional_instructions: Optional[str], - agent_id: Optional[str] = None, - thread_id: Optional[str] = None, - ) -> None: - if not instructions: - return - - event_body: Dict[str, Any] = {} - if _trace_agents_content and (instructions or additional_instructions): - if instructions and additional_instructions: - event_body["content"] = f"{instructions} {additional_instructions}" - else: - event_body["content"] = instructions or additional_instructions - - attributes = self._create_event_attributes(agent_id=agent_id, thread_id=thread_id) - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body, ensure_ascii=False) - span.span_instance.add_event(name=GEN_AI_SYSTEM_MESSAGE, attributes=attributes) - - def _get_role(self, role: Optional[Union[str, MessageRole]]) -> str: - if role is None or role is _Unset: - return "user" - - if isinstance(role, MessageRole): - return role.value - - return role - - def _status_to_string(self, status: Any) -> str: - return status.value if hasattr(status, "value") else status - - def _add_tool_assistant_message_event(self, span, step: RunStep) -> None: - tool_calls = [ - { - "id": t.id, - "type": t.type, - "function": ( - {"name": t.function.name, "arguments": json.loads(t.function.arguments)} - if isinstance(t, RunStepFunctionToolCall) - else None - ), - } - for t in cast(RunStepToolCallDetails, step.step_details).tool_calls - ] - - attributes = self._create_event_attributes( - thread_id=step.thread_id, - agent_id=step.agent_id, - thread_run_id=step.run_id, - message_status=step.status, - usage=step.usage, - ) - - if _trace_agents_content: - attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}, ensure_ascii=False) - else: - tool_calls_non_recording = self._remove_function_call_names_and_arguments(tool_calls=tool_calls) - attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls_non_recording}, ensure_ascii=False) - span.span_instance.add_event(name="gen_ai.assistant.message", attributes=attributes) - - def _add_tool_event_from_thread_run(self, span, run: ThreadRun) -> None: - tool_calls = [] - - for t in run.required_action.submit_tool_outputs.tool_calls: # type: ignore - try: - parsed_arguments = json.loads(t.function.arguments) - except json.JSONDecodeError: - parsed_arguments = {} - - tool_call = { - "id": t.id, - "type": t.type, - "function": ( - {"name": t.function.name, "arguments": parsed_arguments} - if isinstance(t, RequiredFunctionToolCall) - else None - ), - } - tool_calls.append(tool_call) - - attributes = self._create_event_attributes( - thread_id=run.thread_id, - agent_id=run.agent_id, - thread_run_id=run.id, - message_status=run.status, - ) - - if _trace_agents_content: - attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}) - else: - tool_calls_non_recording = self._remove_function_call_names_and_arguments(tool_calls=tool_calls) - attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls_non_recording}) - span.span_instance.add_event(name="gen_ai.assistant.message", attributes=attributes) - - def set_end_run(self, span: "AbstractSpan", run: Optional[ThreadRun]) -> None: - if run and span and span.span_instance.is_recording: - span.add_attribute(GEN_AI_THREAD_RUN_STATUS, self._status_to_string(run.status)) - span.add_attribute(GEN_AI_RESPONSE_MODEL, run.model) - if run and run.usage: - span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, run.usage.prompt_tokens) - span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, run.usage.completion_tokens) - - @staticmethod - def agent_api_response_to_str(response_format: Any) -> Optional[str]: - """ - Convert response_format to string. - - :param response_format: The response format. - :type response_format: ~azure.ai.projects._types.AgentsApiResponseFormatOption - :returns: string for the response_format. - :rtype: Optional[str] - :raises: Value error if response_format is not of type AgentsApiResponseFormatOption. - """ - if isinstance(response_format, str) or response_format is None: - return response_format - if isinstance(response_format, AgentsApiResponseFormatMode): - return response_format.value - if isinstance(response_format, _models.AgentsApiResponseFormat): - return response_format.type - if isinstance(response_format, _models.ResponseFormatJsonSchemaType): - return response_format.type - raise ValueError(f"Unknown response format {type(response_format)}") - - def start_thread_run_span( - self, - operation_name: OperationName, - project_name: str, - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[ThreadMessage]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - _tools: Optional[List[ToolDefinition]] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - ) -> "Optional[AbstractSpan]": - span = start_span( - operation_name, - project_name, - thread_id=thread_id, - agent_id=agent_id, - model=model, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=_AIAgentsInstrumentorPreview.agent_api_response_to_str(response_format), - ) - if span and span.span_instance.is_recording and instructions and additional_instructions: - self._add_instructions_event( - span, instructions, additional_instructions, thread_id=thread_id, agent_id=agent_id - ) - - if additional_messages: - for message in additional_messages: - self.add_thread_message_event(span, message) - return span - - def start_submit_tool_outputs_span( - self, - project_name: str, - thread_id: Optional[str] = None, - run_id: Optional[str] = None, - tool_outputs: Optional[List[ToolOutput]] = None, - event_handler: Optional[Union[AgentEventHandler, AsyncAgentEventHandler]] = None, - ) -> "Optional[AbstractSpan]": - run_span = event_handler.span if isinstance(event_handler, _AgentEventHandlerTraceWrapper) else None - if run_span is None: - run_span = event_handler.span if isinstance(event_handler, _AsyncAgentEventHandlerTraceWrapper) else None - - if run_span: - recorded = self._add_tool_message_events(run_span, tool_outputs) - else: - recorded = False - - span = start_span(OperationName.SUBMIT_TOOL_OUTPUTS, project_name, thread_id=thread_id, run_id=run_id) - if not recorded: - self._add_tool_message_events(span, tool_outputs) - return span - - def _add_tool_message_events( - self, span: "Optional[AbstractSpan]", tool_outputs: Optional[List[ToolOutput]] - ) -> bool: - if span and span.span_instance.is_recording and tool_outputs: - for tool_output in tool_outputs: - if _trace_agents_content: - body = {"content": tool_output["output"], "id": tool_output["tool_call_id"]} - else: - body = {"content": "", "id": tool_output["tool_call_id"]} - span.span_instance.add_event( - "gen_ai.tool.message", {"gen_ai.event.content": json.dumps(body, ensure_ascii=False)} - ) - return True - - return False - - def start_create_agent_span( - self, - project_name: str, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - _tools: Optional[List[ToolDefinition]] = None, - _tool_resources: Optional[ToolResources] = None, - _toolset: Optional[ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - ) -> "Optional[AbstractSpan]": - span = start_span( - OperationName.CREATE_AGENT, - project_name, - span_name=f"{OperationName.CREATE_AGENT.value} {name}", - model=model, - temperature=temperature, - top_p=top_p, - response_format=_AIAgentsInstrumentorPreview.agent_api_response_to_str(response_format), - ) - if span and span.span_instance.is_recording: - if name: - span.add_attribute(GEN_AI_AGENT_NAME, name) - if description: - span.add_attribute(GEN_AI_AGENT_DESCRIPTION, description) - self._add_instructions_event(span, instructions, None) - - return span - - def start_create_thread_span( - self, - project_name: str, - messages: Optional[List[ThreadMessage]] = None, - _tool_resources: Optional[ToolResources] = None, - ) -> "Optional[AbstractSpan]": - span = start_span(OperationName.CREATE_THREAD, project_name) - if span and span.span_instance.is_recording: - for message in messages or []: - self.add_thread_message_event(span, message) - - return span - - def start_list_messages_span(self, project_name: str, thread_id: Optional[str] = None) -> "Optional[AbstractSpan]": - return start_span(OperationName.LIST_MESSAGES, project_name, thread_id=thread_id) - - def trace_create_agent(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - name = kwargs.get("name") - model = kwargs.get("model") - description = kwargs.get("description") - instructions = kwargs.get("instructions") - tools = kwargs.get("tools") - tool_resources = kwargs.get("tool_resources") - toolset = kwargs.get("toolset") - temperature = kwargs.get("temperature") - top_p = kwargs.get("top_p") - response_format = kwargs.get("response_format") - - span = self.start_create_agent_span( - project_name=project_name, - name=name, - model=model, - description=description, - instructions=instructions, - _tools=tools, - _tool_resources=tool_resources, - _toolset=toolset, - temperature=temperature, - top_p=top_p, - response_format=response_format, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - span.add_attribute(GEN_AI_AGENT_ID, result.id) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_agent_async(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - name = kwargs.get("name") - model = kwargs.get("model") - description = kwargs.get("description") - instructions = kwargs.get("instructions") - tools = kwargs.get("tools") - tool_resources = kwargs.get("tool_resources") - toolset = kwargs.get("toolset") - temperature = kwargs.get("temperature") - top_p = kwargs.get("top_p") - response_format = kwargs.get("response_format") - - span = self.start_create_agent_span( - project_name=project_name, - name=name, - model=model, - description=description, - instructions=instructions, - _tools=tools, - _tool_resources=tool_resources, - _toolset=toolset, - temperature=temperature, - top_p=top_p, - response_format=response_format, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - span.add_attribute(GEN_AI_AGENT_ID, result.id) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_create_thread(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - messages = kwargs.get("messages") - - span = self.start_create_thread_span(project_name=project_name, messages=messages) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_thread_async(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - messages = kwargs.get("messages") - - span = self.start_create_thread_span(project_name=project_name, messages=messages) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_create_message(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - role = kwargs.get("role") - content = kwargs.get("content") - attachments = kwargs.get("attachments") - - span = self.start_create_message_span( - project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_message_async(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - role = kwargs.get("role") - content = kwargs.get("content") - attachments = kwargs.get("attachments") - - span = self.start_create_message_span( - project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_create_run(self, operation_name, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - agent_id = kwargs.get("agent_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - - span = self.start_thread_run_span( - operation_name, - project_name, - thread_id, - agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - self.set_end_run(span, result) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_run_async(self, operation_name, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - agent_id = kwargs.get("agent_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - - span = self.start_thread_run_span( - operation_name, - project_name, - thread_id, - agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - if span.span_instance.is_recording: - span.add_attribute(GEN_AI_THREAD_RUN_STATUS, self._status_to_string(result.status)) - span.add_attribute(GEN_AI_RESPONSE_MODEL, result.model) - if result.usage: - span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, result.usage.prompt_tokens) - span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, result.usage.completion_tokens) - span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - run_id = kwargs.get("run_id") - tool_outputs = kwargs.get("tool_outputs") - event_handler = kwargs.get("event_handler") - - span = self.start_submit_tool_outputs_span( - project_name=project_name, - thread_id=thread_id, - run_id=run_id, - tool_outputs=tool_outputs, - event_handler=event_handler, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - if stream and event_handler: - kwargs["event_handler"] = self.wrap_handler(event_handler, span) - - result = function(*args, **kwargs) - if not isinstance(result, AgentRunStream): - self.set_end_run(span, result) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_submit_tool_outputs_async(self, stream, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - run_id = kwargs.get("run_id") - tool_outputs = kwargs.get("tool_outputs") - event_handler = kwargs.get("event_handler") - - span = self.start_submit_tool_outputs_span( - project_name=project_name, - thread_id=thread_id, - run_id=run_id, - tool_outputs=tool_outputs, - event_handler=event_handler, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - if stream: - kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) - - result = await function(*args, **kwargs) - if not isinstance(result, AsyncAgentRunStream): - self.set_end_run(span, result) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_handle_submit_tool_outputs(self, function, *args, **kwargs): - event_handler = kwargs.get("event_handler") - if event_handler is None: - event_handler = args[2] - span = getattr(event_handler, "span", None) - - if span is None: - return function(*args, **kwargs) - - with span.change_context(span.span_instance): - try: - result = function(*args, **kwargs) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_handle_submit_tool_outputs_async(self, function, *args, **kwargs): - event_handler = kwargs.get("event_handler") - if event_handler is None: - event_handler = args[2] - span = getattr(event_handler, "span", None) - - if span is None: - return await function(*args, **kwargs) - - with span.change_context(span.span_instance): - try: - result = await function(*args, **kwargs) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_create_stream(self, function, *args, **kwargs): - operation_name = OperationName.PROCESS_THREAD_RUN - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - agent_id = kwargs.get("agent_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - event_handler = kwargs.get("event_handler") - - span = self.start_thread_run_span( - operation_name, - project_name, - thread_id, - agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return function(*args, **kwargs) - - with span.change_context(span.span_instance): - try: - kwargs["event_handler"] = self.wrap_handler(event_handler, span) - result = function(*args, **kwargs) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_stream_async(self, function, *args, **kwargs): - operation_name = OperationName.PROCESS_THREAD_RUN - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - agent_id = kwargs.get("agent_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - event_handler = kwargs.get("event_handler") - - span = self.start_thread_run_span( - operation_name, - project_name, - thread_id, - agent_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return await function(*args, **kwargs) - - # TODO: how to keep span active in the current context without existing? - # TODO: dummy span for none - with span.change_context(span.span_instance): - try: - kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) - result = await function(*args, **kwargs) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_list_messages(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - - span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - for message in result.data: - self.add_thread_message_event(span, message) - - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_list_messages_async(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - - span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - for message in result.data: - self.add_thread_message_event(span, message) - - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def handle_run_stream_exit(self, _function, *args, **kwargs): - agent_run_stream = args[0] - exc_type = kwargs.get("exc_type") - exc_val = kwargs.get("exc_val") - exc_tb = kwargs.get("exc_tb") - # TODO: is it a good idea? - # if not, we'll need to wrap stream and call exit - if ( - agent_run_stream.event_handler - and agent_run_stream.event_handler.__class__.__name__ == "_AgentEventHandlerTraceWrapper" - ): - agent_run_stream.event_handler.__exit__(exc_type, exc_val, exc_tb) - elif ( - agent_run_stream.event_handler - and agent_run_stream.event_handler.__class__.__name__ == "_AsyncAgentEventHandlerTraceWrapper" - ): - agent_run_stream.event_handler.__aexit__(exc_type, exc_val, exc_tb) - - def wrap_handler( - self, handler: "Optional[AgentEventHandler]" = None, span: "Optional[AbstractSpan]" = None - ) -> "Optional[AgentEventHandler]": - # Do not create a handler wrapper if we do not have handler in the first place. - if not handler: - return None - - if isinstance(handler, _AgentEventHandlerTraceWrapper): - return handler - - if span and span.span_instance.is_recording: - return _AgentEventHandlerTraceWrapper(self, span, handler) - - return handler - - def wrap_async_handler( - self, handler: "Optional[AsyncAgentEventHandler]" = None, span: "Optional[AbstractSpan]" = None - ) -> "Optional[AsyncAgentEventHandler]": - # Do not create a handler wrapper if we do not have handler in the first place. - if not handler: - return None - - if isinstance(handler, _AsyncAgentEventHandlerTraceWrapper): - return handler - - if span and span.span_instance.is_recording: - return _AsyncAgentEventHandlerTraceWrapper(self, span, handler) - - return handler - - def start_create_message_span( - self, - project_name: str, - thread_id: Optional[str] = None, - content: Optional[str] = None, - role: Optional[Union[str, MessageRole]] = None, - attachments: Optional[List[MessageAttachment]] = None, - ) -> "Optional[AbstractSpan]": - role_str = self._get_role(role) - span = start_span(OperationName.CREATE_MESSAGE, project_name, thread_id=thread_id) - if span and span.span_instance.is_recording: - self._add_message_event(span, role_str, content, attachments=attachments, thread_id=thread_id) - return span - - def _trace_sync_function( - self, - function: Callable, - *, - _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AGENTS, - _name: Optional[str] = None, - ) -> Callable: - """ - Decorator that adds tracing to a synchronous function. - - :param function: The function to be traced. - :type function: Callable - :param args_to_ignore: A list of argument names to be ignored in the trace. - Defaults to None. - :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AGENTS. - :type trace_type: TraceType, optional - :param name: The name of the trace, will set to func name if not provided. - :type name: str, optional - :return: The traced function. - :rtype: Callable - """ - - @functools.wraps(function) - def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 - if span_impl_type is None: - return function(*args, **kwargs) - - class_function_name = function.__qualname__ - - if class_function_name.startswith("AgentsOperations.create_agent"): - kwargs.setdefault("merge_span", True) - return self.trace_create_agent(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_thread"): - kwargs.setdefault("merge_span", True) - return self.trace_create_thread(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_message"): - kwargs.setdefault("merge_span", True) - return self.trace_create_message(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_run"): - kwargs.setdefault("merge_span", True) - return self.trace_create_run(OperationName.START_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_and_process_run"): - kwargs.setdefault("merge_span", True) - return self.trace_create_run(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): - kwargs.setdefault("merge_span", True) - return self.trace_submit_tool_outputs(False, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): - kwargs.setdefault("merge_span", True) - return self.trace_submit_tool_outputs(True, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): - return self.trace_handle_submit_tool_outputs(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_stream"): - kwargs.setdefault("merge_span", True) - return self.trace_create_stream(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.list_messages"): - kwargs.setdefault("merge_span", True) - return self.trace_list_messages(function, *args, **kwargs) - if class_function_name.startswith("AgentRunStream.__exit__"): - return self.handle_run_stream_exit(function, *args, **kwargs) - # Handle the default case (if the function name does not match) - return None # Ensure all paths return - - return inner - - def _trace_async_function( - self, - function: Callable, - *, - _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AGENTS, - _name: Optional[str] = None, - ) -> Callable: - """ - Decorator that adds tracing to an asynchronous function. - - :param function: The function to be traced. - :type function: Callable - :param args_to_ignore: A list of argument names to be ignored in the trace. - Defaults to None. - :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AGENTS. - :type trace_type: TraceType, optional - :param name: The name of the trace, will set to func name if not provided. - :type name: str, optional - :return: The traced function. - :rtype: Callable - """ - - @functools.wraps(function) - async def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 - if span_impl_type is None: - return function(*args, **kwargs) - - class_function_name = function.__qualname__ - - if class_function_name.startswith("AgentsOperations.create_agent"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_agent_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_thread"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_thread_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_message"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_message_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_run"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_run_async(OperationName.START_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_and_process_run"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_run_async(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): - kwargs.setdefault("merge_span", True) - return await self.trace_submit_tool_outputs_async(False, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): - kwargs.setdefault("merge_span", True) - return await self.trace_submit_tool_outputs_async(True, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): - return await self.trace_handle_submit_tool_outputs_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_stream"): - kwargs.setdefault("merge_span", True) - return await self.trace_create_stream_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.list_messages"): - kwargs.setdefault("merge_span", True) - return await self.trace_list_messages_async(function, *args, **kwargs) - if class_function_name.startswith("AsyncAgentRunStream.__aexit__"): - return self.handle_run_stream_exit(function, *args, **kwargs) - # Handle the default case (if the function name does not match) - return None # Ensure all paths return - - return inner - - def _inject_async(self, f, _trace_type, _name): - wrapper_fun = self._trace_async_function(f) - wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - return wrapper_fun - - def _inject_sync(self, f, _trace_type, _name): - wrapper_fun = self._trace_sync_function(f) - wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - return wrapper_fun - - def _agents_apis(self): - sync_apis = ( - ("azure.ai.projects.operations", "AgentsOperations", "create_agent", TraceType.AGENTS, "agent_create"), - ("azure.ai.projects.operations", "AgentsOperations", "create_thread", TraceType.AGENTS, "thread_create"), - ("azure.ai.projects.operations", "AgentsOperations", "create_message", TraceType.AGENTS, "message_create"), - ("azure.ai.projects.operations", "AgentsOperations", "create_run", TraceType.AGENTS, "create_run"), - ( - "azure.ai.projects.operations", - "AgentsOperations", - "create_and_process_run", - TraceType.AGENTS, - "create_and_process_run", - ), - ( - "azure.ai.projects.operations", - "AgentsOperations", - "submit_tool_outputs_to_run", - TraceType.AGENTS, - "submit_tool_outputs_to_run", - ), - ( - "azure.ai.projects.operations", - "AgentsOperations", - "submit_tool_outputs_to_stream", - TraceType.AGENTS, - "submit_tool_outputs_to_stream", - ), - ( - "azure.ai.projects.operations", - "AgentsOperations", - "_handle_submit_tool_outputs", - TraceType.AGENTS, - "_handle_submit_tool_outputs", - ), - ("azure.ai.projects.operations", "AgentsOperations", "create_stream", TraceType.AGENTS, "create_stream"), - ("azure.ai.projects.operations", "AgentsOperations", "list_messages", TraceType.AGENTS, "list_messages"), - ("azure.ai.projects.models", "AgentRunStream", "__exit__", TraceType.AGENTS, "__exit__"), - ) - async_apis = ( - ("azure.ai.projects.aio.operations", "AgentsOperations", "create_agent", TraceType.AGENTS, "agent_create"), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "create_thread", - TraceType.AGENTS, - "agents_thread_create", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "create_message", - TraceType.AGENTS, - "agents_thread_message", - ), - ("azure.ai.projects.aio.operations", "AgentsOperations", "create_run", TraceType.AGENTS, "create_run"), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "create_and_process_run", - TraceType.AGENTS, - "create_and_process_run", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "submit_tool_outputs_to_run", - TraceType.AGENTS, - "submit_tool_outputs_to_run", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "submit_tool_outputs_to_stream", - TraceType.AGENTS, - "submit_tool_outputs_to_stream", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "_handle_submit_tool_outputs", - TraceType.AGENTS, - "_handle_submit_tool_outputs", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "create_stream", - TraceType.AGENTS, - "create_stream", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "list_messages", - TraceType.AGENTS, - "list_messages", - ), - ("azure.ai.projects.models", "AsyncAgentRunStream", "__aexit__", TraceType.AGENTS, "__aexit__"), - ) - return sync_apis, async_apis - - def _agents_api_list(self): - sync_apis, async_apis = self._agents_apis() - yield sync_apis, self._inject_sync - yield async_apis, self._inject_async - - def _generate_api_and_injector(self, apis): - for api, injector in apis: - for module_name, class_name, method_name, trace_type, name in api: - try: - module = importlib.import_module(module_name) - api = getattr(module, class_name) - if hasattr(api, method_name): - yield api, method_name, trace_type, injector, name - except AttributeError as e: - # Log the attribute exception with the missing class information - logging.warning( - "AttributeError: The module '%s' does not have the class '%s'. %s", - module_name, - class_name, - str(e), - ) - except Exception as e: # pylint: disable=broad-except - # Log other exceptions as a warning, as we are not sure what they might be - logging.warning("An unexpected error occurred: '%s'", str(e)) - - def _available_agents_apis_and_injectors(self): - """ - Generates a sequence of tuples containing Agents API classes, method names, and - corresponding injector functions. - - :return: A generator yielding tuples. - :rtype: tuple - """ - yield from self._generate_api_and_injector(self._agents_api_list()) - - def _instrument_agents(self, enable_content_tracing: bool = False): - """This function modifies the methods of the Agents API classes to - inject logic before calling the original methods. - The original methods are stored as _original attributes of the methods. - - :param enable_content_tracing: Indicates whether tracing of message content should be enabled. - This also controls whether function call tool function names, - parameter names and parameter values are traced. - :type enable_content_tracing: bool - """ - # pylint: disable=W0603 - global _agents_traces_enabled - global _trace_agents_content - if _agents_traces_enabled: - raise RuntimeError("Traces already started for AI Agents") - _agents_traces_enabled = True - _trace_agents_content = enable_content_tracing - for ( - api, - method, - trace_type, - injector, - name, - ) in self._available_agents_apis_and_injectors(): - # Check if the method of the api class has already been modified - if not hasattr(getattr(api, method), "_original"): - setattr(api, method, injector(getattr(api, method), trace_type, name)) - - def _uninstrument_agents(self): - """This function restores the original methods of the Agents API classes - by assigning them back from the _original attributes of the modified methods. - """ - # pylint: disable=W0603 - global _agents_traces_enabled - global _trace_agents_content - _trace_agents_content = False - for api, method, _, _, _ in self._available_agents_apis_and_injectors(): - if hasattr(getattr(api, method), "_original"): - setattr(api, method, getattr(getattr(api, method), "_original")) - _agents_traces_enabled = False - - def _is_instrumented(self): - """This function returns True if Agents API has already been instrumented - for tracing and False if it has not been instrumented. - - :return: A value indicating whether the Agents API is currently instrumented or not. - :rtype: bool - """ - return _agents_traces_enabled - - def _set_enable_content_recording(self, enable_content_recording: bool = False) -> None: - """This function sets the content recording value. - - :param enable_content_recording: Indicates whether tracing of message content should be enabled. - This also controls whether function call tool function names, - parameter names and parameter values are traced. - :type enable_content_recording: bool - """ - global _trace_agents_content # pylint: disable=W0603 - _trace_agents_content = enable_content_recording - - def _is_content_recording_enabled(self) -> bool: - """This function gets the content recording value. - - :return: A bool value indicating whether content tracing is enabled. - :rtype bool - """ - return _trace_agents_content - - -class _AgentEventHandlerTraceWrapper(AgentEventHandler): - def __init__( - self, - instrumentor: _AIAgentsInstrumentorPreview, - span: "AbstractSpan", - inner_handler: Optional[AgentEventHandler] = None, - ): - super().__init__() - self.span = span - self.inner_handler = inner_handler - self.ended = False - self.last_run: Optional[ThreadRun] = None - self.last_message: Optional[ThreadMessage] = None - self.instrumentor = instrumentor - - def initialize( - self, - response_iterator, - submit_tool_outputs, - ) -> None: - self.submit_tool_outputs = submit_tool_outputs - if self.inner_handler: - self.inner_handler.initialize(response_iterator=response_iterator, submit_tool_outputs=submit_tool_outputs) - - def __next__(self) -> Any: - if self.inner_handler: - event_bytes = self.inner_handler.__next_impl__() - return self._process_event(event_bytes.decode("utf-8")) - return None - - # pylint: disable=R1710 - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_message_delta(delta) # type: ignore - - def on_thread_message(self, message: "ThreadMessage") -> None: # type: ignore[func-returns-value] - retval = None - if self.inner_handler: - retval = self.inner_handler.on_thread_message(message) # type: ignore - - if message.status in {"completed", "incomplete"}: - self.last_message = message - - return retval # type: ignore - - def on_thread_run(self, run: "ThreadRun") -> None: # type: ignore[func-returns-value] - retval = None - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - self.instrumentor._add_tool_event_from_thread_run( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - self.span, run - ) - - if self.inner_handler: - retval = self.inner_handler.on_thread_run(run) # type: ignore - self.last_run = run - - return retval # type: ignore - - def on_run_step(self, step: "RunStep") -> None: # type: ignore[func-returns-value] - retval = None - if self.inner_handler: - retval = self.inner_handler.on_run_step(step) # type: ignore - - # todo - report errors for failure statuses here and in run ? - if step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: - self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) - self.last_message = None - - return retval # type: ignore - - def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_run_step_delta(delta) # type: ignore - - def on_error(self, data: str) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_error(data) # type: ignore - - def on_done(self) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_done() # type: ignore - # it could be called multiple tines (for each step) __exit__ - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return self.inner_handler.on_unhandled_event(event_type, event_data) # type: ignore - - # pylint: enable=R1710 - - def __exit__(self, exc_type, exc_val, exc_tb): - if not self.ended: - self.ended = True - self.instrumentor.set_end_run(self.span, self.last_run) - - if self.last_run and self.last_run.last_error: - self.span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - self.last_run.last_error.message, - ) - self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) - - self.span.__exit__(exc_type, exc_val, exc_tb) - self.span.finish() - - -class _AsyncAgentEventHandlerTraceWrapper(AsyncAgentEventHandler): - def __init__( - self, - instrumentor: _AIAgentsInstrumentorPreview, - span: "AbstractSpan", - inner_handler: Optional[AsyncAgentEventHandler] = None, - ): - super().__init__() - self.span = span - self.inner_handler = inner_handler - self.ended = False - self.last_run: Optional[ThreadRun] = None - self.last_message: Optional[ThreadMessage] = None - self.instrumentor = instrumentor - - def initialize( - self, - response_iterator, - submit_tool_outputs, - ) -> None: - self.submit_tool_outputs = submit_tool_outputs - if self.inner_handler: - self.inner_handler.initialize(response_iterator=response_iterator, submit_tool_outputs=submit_tool_outputs) - - # cspell:disable-next-line - async def __anext__(self) -> Any: - if self.inner_handler: - # cspell:disable-next-line - event_bytes = await self.inner_handler.__anext_impl__() - return await self._process_event(event_bytes.decode("utf-8")) - - # pylint: disable=R1710 - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_message_delta(delta) # type: ignore - - async def on_thread_message(self, message: "ThreadMessage") -> None: # type: ignore[func-returns-value] - retval = None - if self.inner_handler: - retval = await self.inner_handler.on_thread_message(message) # type: ignore - - if message.status in {"completed", "incomplete"}: - self.last_message = message - - return retval # type: ignore - - async def on_thread_run(self, run: "ThreadRun") -> None: # type: ignore[func-returns-value] - retval = None - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - self.instrumentor._add_tool_event_from_thread_run( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - self.span, run - ) - - if self.inner_handler: - retval = await self.inner_handler.on_thread_run(run) # type: ignore - self.last_run = run - - return retval # type: ignore - - async def on_run_step(self, step: "RunStep") -> None: # type: ignore[func-returns-value] - retval = None - if self.inner_handler: - retval = await self.inner_handler.on_run_step(step) # type: ignore - - # todo - report errors for failure statuses here and in run ? - if step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: - self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) - self.last_message = None - - return retval # type: ignore - - async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_run_step_delta(delta) # type: ignore - - async def on_error(self, data: str) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_error(data) # type: ignore - - async def on_done(self) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_done() # type: ignore - # it could be called multiple tines (for each step) __exit__ - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - return await self.inner_handler.on_unhandled_event(event_type, event_data) # type: ignore - - # pylint: enable=R1710 - - def __aexit__(self, exc_type, exc_val, exc_tb): - if not self.ended: - self.ended = True - self.instrumentor.set_end_run(self.span, self.last_run) - - if self.last_run and self.last_run.last_error: - self.span.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - self.last_run.last_error.message, - ) - self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) - - self.span.__exit__(exc_type, exc_val, exc_tb) - self.span.finish() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py deleted file mode 100644 index bdc18e1381e8..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py +++ /dev/null @@ -1,139 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -from enum import Enum -from typing import Optional - -from azure.core.tracing import AbstractSpan, SpanKind # type: ignore -from azure.core.settings import settings # type: ignore - -try: - from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import - - _span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable -except ModuleNotFoundError: - _span_impl_type = None - - -GEN_AI_MESSAGE_ID = "gen_ai.message.id" -GEN_AI_MESSAGE_STATUS = "gen_ai.message.status" -GEN_AI_THREAD_ID = "gen_ai.thread.id" -GEN_AI_THREAD_RUN_ID = "gen_ai.thread.run.id" -GEN_AI_AGENT_ID = "gen_ai.agent.id" -GEN_AI_AGENT_NAME = "gen_ai.agent.name" -GEN_AI_AGENT_DESCRIPTION = "gen_ai.agent.description" -GEN_AI_OPERATION_NAME = "gen_ai.operation.name" -GEN_AI_THREAD_RUN_STATUS = "gen_ai.thread.run.status" -GEN_AI_REQUEST_MODEL = "gen_ai.request.model" -GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature" -GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p" -GEN_AI_REQUEST_MAX_INPUT_TOKENS = "gen_ai.request.max_input_tokens" -GEN_AI_REQUEST_MAX_OUTPUT_TOKENS = "gen_ai.request.max_output_tokens" -GEN_AI_RESPONSE_MODEL = "gen_ai.response.model" -GEN_AI_SYSTEM = "gen_ai.system" -SERVER_ADDRESS = "server.address" -AZ_AI_AGENT_SYSTEM = "az.ai.agents" -GEN_AI_TOOL_NAME = "gen_ai.tool.name" -GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id" -GEN_AI_REQUEST_RESPONSE_FORMAT = "gen_ai.request.response_format" -GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens" -GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens" -GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message" -GEN_AI_EVENT_CONTENT = "gen_ai.event.content" -ERROR_TYPE = "error.type" - - -class OperationName(Enum): - CREATE_AGENT = "create_agent" - CREATE_THREAD = "create_thread" - CREATE_MESSAGE = "create_message" - START_THREAD_RUN = "start_thread_run" - EXECUTE_TOOL = "execute_tool" - LIST_MESSAGES = "list_messages" - SUBMIT_TOOL_OUTPUTS = "submit_tool_outputs" - PROCESS_THREAD_RUN = "process_thread_run" - - -def trace_tool_execution( - tool_call_id: str, - tool_name: str, - thread_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow - agent_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow - run_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow -) -> "Optional[AbstractSpan]": - span = start_span( - OperationName.EXECUTE_TOOL, - server_address=None, - span_name=f"execute_tool {tool_name}", - thread_id=thread_id, - agent_id=agent_id, - run_id=run_id, - gen_ai_system=None, - ) # it's a client code execution, not GenAI span - if span is not None and span.span_instance.is_recording: - span.add_attribute(GEN_AI_TOOL_CALL_ID, tool_call_id) - span.add_attribute(GEN_AI_TOOL_NAME, tool_name) - - return span - - -def start_span( - operation_name: OperationName, - server_address: Optional[str], - span_name: Optional[str] = None, - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - run_id: Optional[str] = None, - model: Optional[str] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - response_format: Optional[str] = None, - gen_ai_system: Optional[str] = AZ_AI_AGENT_SYSTEM, - kind: SpanKind = SpanKind.CLIENT, -) -> "Optional[AbstractSpan]": - if _span_impl_type is None: - return None - - span = _span_impl_type(name=span_name or operation_name.value, kind=kind) - - if span and span.span_instance.is_recording: - if gen_ai_system: - span.add_attribute(GEN_AI_SYSTEM, AZ_AI_AGENT_SYSTEM) - - span.add_attribute(GEN_AI_OPERATION_NAME, operation_name.value) - - if server_address: - span.add_attribute(SERVER_ADDRESS, server_address) - - if thread_id: - span.add_attribute(GEN_AI_THREAD_ID, thread_id) - - if agent_id: - span.add_attribute(GEN_AI_AGENT_ID, agent_id) - - if run_id: - span.add_attribute(GEN_AI_THREAD_RUN_ID, run_id) - - if model: - span.add_attribute(GEN_AI_REQUEST_MODEL, model) - - if temperature: - span.add_attribute(GEN_AI_REQUEST_TEMPERATURE, str(temperature)) - - if top_p: - span.add_attribute(GEN_AI_REQUEST_TOP_P, str(top_p)) - - if max_prompt_tokens: - span.add_attribute(GEN_AI_REQUEST_MAX_INPUT_TOKENS, max_prompt_tokens) - - if max_completion_tokens: - span.add_attribute(GEN_AI_REQUEST_MAX_OUTPUT_TOKENS, max_completion_tokens) - - if response_format: - span.add_attribute(GEN_AI_REQUEST_RESPONSE_FORMAT, response_format) - - return span diff --git a/sdk/ai/azure-ai-projects/sdk_packaging.toml b/sdk/ai/azure-ai-projects/sdk_packaging.toml new file mode 100644 index 000000000000..e7687fdae93b --- /dev/null +++ b/sdk/ai/azure-ai-projects/sdk_packaging.toml @@ -0,0 +1,2 @@ +[packaging] +auto_update = false \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/setup.py b/sdk/ai/azure-ai-projects/setup.py index 28b6a92413c5..0e77efb5eebe 100644 --- a/sdk/ai/azure-ai-projects/setup.py +++ b/sdk/ai/azure-ai-projects/setup.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,7 +5,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -# coding: utf-8 + import os import re @@ -14,17 +13,7 @@ PACKAGE_NAME = "azure-ai-projects" -PACKAGE_PPRINT_NAME = "Azure AI Projects" - -PIPY_LONG_DESCRIPTION_BEGIN = "" -PIPY_LONG_DESCRIPTION_END = "" -LINKS_DIVIDER = "" - -GITHUB_URL = f"https://aka.ms/azsdk/azure-ai-projects/python/code" - -# Define the regular expression pattern to match links in the format [section name](#section_header) -pattern = re.compile(r"\[([^\]]+)\]\(#([^\)]+)\)") - +PACKAGE_PPRINT_NAME = "Azure Ai Projects" # a-b-c => a/b/c package_folder_path = PACKAGE_NAME.replace("-", "/") @@ -37,49 +26,27 @@ raise RuntimeError("Cannot find version information") -long_description = "" - -# When you click the links in the Table of Content which has the format of {URL/#section_header}, you are supposed to be redirected to the section header. -# However, this is not supported when the README is rendered in pypi.org. The README doesn't render with id={section_header} in HTML. -# To resolve this broken link, we make the long description to have top of the README content, the Table of Content, and the links at the bottom of the README -# And replace the links in Table of Content to redirect to github.com. -with open("README.md", "r") as f: - readme_content = f.read() - start_index = readme_content.find(PIPY_LONG_DESCRIPTION_BEGIN) + len(PIPY_LONG_DESCRIPTION_BEGIN) - end_index = readme_content.find(PIPY_LONG_DESCRIPTION_END) - long_description = readme_content[start_index:end_index].strip() - long_description = long_description.replace("{{package_name}}", PACKAGE_PPRINT_NAME) - long_description = re.sub(pattern, rf"[\1]({GITHUB_URL})", long_description) - links_index = readme_content.find(LINKS_DIVIDER) - long_description += "\n\n" + readme_content[links_index:].strip() - -with open("CHANGELOG.md", "r") as f: - long_description += "\n\n" + f.read() - setup( name=PACKAGE_NAME, version=version, - description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), - long_description=long_description, + description="Microsoft Corporation {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), long_description_content_type="text/markdown", license="MIT License", author="Microsoft Corporation", author_email="azpysdkhelp@microsoft.com", - url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-projects", - keywords="azure sdk, azure, ai, agents, foundry, inference, chat completion, project, evaluation", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", classifiers=[ "Development Status :: 4 - Beta", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", "License :: OSI Approved :: MIT License", - "Topic :: Scientific/Engineering :: Artificial Intelligence", ], zip_safe=False, packages=find_packages( @@ -97,10 +64,7 @@ install_requires=[ "isodate>=0.6.1", "azure-core>=1.30.0", - "typing-extensions>=4.12.2", + "typing-extensions>=4.6.0", ], - python_requires=">=3.8", - extras_require={ - "prompts": ["prompty"], - }, + python_requires=">=3.9", ) diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 95a2db669486..f50b78517b5e 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 66f3f5a1184215abf25d93f185b55dfbc75b0050 +commit: d5463f01235f1f0f939738ecfd706026e084e8f3 repo: Azure/azure-rest-api-specs additionalDirectories: