From 3178b9c9490584b12a85ca1f5ed1959da681a3bd Mon Sep 17 00:00:00 2001 From: azure-sdk Date: Thu, 6 Nov 2025 00:14:56 +0000 Subject: [PATCH 01/10] Configurations: 'specification/cognitiveservices/data-plane/LanguageAnalyzeConversations/tspconfig.yaml', API Version: 2025-11-15-preview, SDK Release Type: beta, and CommitSHA: '5b0855a6739c8dbd68136d364d7fc5ed1aa0c43f' in SpecRepo: 'https://github.com/Azure/azure-rest-api-specs' Pipeline run: https://dev.azure.com/azure-sdk/internal/_build/results?buildId=5539661 Refer to https://eng.ms/docs/products/azure-developer-experience/develop/sdk-release/sdk-release-prerequisites to prepare for SDK release. --- .../_metadata.json | 6 +- .../apiview-properties.json | 12 +- .../ai/language/conversations/__init__.py | 4 +- .../ai/language/conversations/_client.py | 10 +- .../language/conversations/_configuration.py | 8 +- .../conversations/_operations/__init__.py | 2 +- .../conversations/_operations/_operations.py | 50 +- .../conversations/_operations/_patch.py | 5 +- .../azure/ai/language/conversations/_patch.py | 300 +---------- .../conversations/_utils/model_base.py | 60 ++- .../conversations/_utils/serialization.py | 26 +- .../ai/language/conversations/_version.py | 2 +- .../ai/language/conversations/aio/__init__.py | 4 +- .../ai/language/conversations/aio/_client.py | 10 +- .../conversations/aio/_configuration.py | 8 +- .../conversations/aio/_operations/__init__.py | 2 +- .../aio/_operations/_operations.py | 42 +- .../conversations/aio/_operations/_patch.py | 4 +- .../ai/language/conversations/aio/_patch.py | 305 +---------- .../language/conversations/models/__init__.py | 8 + .../language/conversations/models/_enums.py | 84 +++ .../language/conversations/models/_models.py | 492 ++++++++++++------ .../language/conversations/models/_patch.py | 31 +- ...ccessful_analyze_conversational_ai_task.py | 68 +++ .../successful_analyze_conversations.py | 43 ++ ...ssful_analyze_conversations_arbitration.py | 51 ++ ...conversations_arbitration_direct_target.py | 51 ++ ...alyze_conversations_jobs_cancel_request.py | 32 ++ .../generated_tests/conftest.py | 45 ++ .../test_conversation_analysis.py | 56 ++ .../test_conversation_analysis_async.py | 59 +++ .../generated_tests/testpreparer.py | 28 + .../generated_tests/testpreparer_async.py | 20 + .../pyproject.toml | 44 +- ...onversation_multi_turn_prediction_async.py | 71 +-- .../async/sample_conversation_pii_async.py | 3 + ...on_pii_with_character_mask_policy_async.py | 4 + ...ation_pii_with_entity_mask_policy_async.py | 6 +- ...versation_pii_with_no_mask_policy_async.py | 16 +- .../sample_conversation_prediction_async.py | 2 + ...ersation_prediction_with_language_async.py | 2 + ...versation_prediction_with_options_async.py | 2 + ...sample_conversation_summarization_async.py | 2 + .../sample_orchestration_prediction_async.py | 2 + .../samples/sample_authentication.py | 1 + ...mple_conversation_multi_turn_prediction.py | 5 +- .../samples/sample_conversation_pii.py | 2 + ...ersation_pii_with_character_mask_policy.py | 2 + ...onversation_pii_with_entity_mask_policy.py | 2 + ...le_conversation_pii_with_no_mask_policy.py | 6 +- .../samples/sample_conversation_prediction.py | 2 + ...e_conversation_prediction_with_language.py | 2 + ...le_conversation_prediction_with_options.py | 2 + .../sample_conversation_summarization.py | 2 + .../sample_orchestration_prediction.py | 2 + .../sdk_packaging.toml | 2 - .../tsp-location.yaml | 4 +- 57 files changed, 1153 insertions(+), 963 deletions(-) create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversational_ai_task.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration_direct_target.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_jobs_cancel_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/conftest.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer_async.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/sdk_packaging.toml diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/_metadata.json b/sdk/cognitivelanguage/azure-ai-language-conversations/_metadata.json index 253921f335be..48f671f46df4 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/_metadata.json +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/_metadata.json @@ -1,3 +1,7 @@ { - "apiVersion": "2025-05-15-preview" + "apiVersion": "2025-11-15-preview", + "commit": "5b0855a6739c8dbd68136d364d7fc5ed1aa0c43f", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/cognitiveservices/data-plane/LanguageAnalyzeConversations", + "emitterVersion": "0.53.1" } \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/apiview-properties.json b/sdk/cognitivelanguage/azure-ai-language-conversations/apiview-properties.json index abc75142da86..769cab9f7a56 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/apiview-properties.json +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/apiview-properties.json @@ -65,6 +65,10 @@ "azure.ai.language.conversations.models.KnowledgeBaseAnswerPrompt": "Language.Conversations.KnowledgeBaseAnswerPrompt", "azure.ai.language.conversations.models.LengthResolution": "Language.Conversations.LengthResolution", "azure.ai.language.conversations.models.ListKey": "Language.Conversations.ListKey", + "azure.ai.language.conversations.models.LuisCallingConfig": "Language.Conversations.LuisCallingOptions", + "azure.ai.language.conversations.models.LuisConfig": "Language.Conversations.LuisParameters", + "azure.ai.language.conversations.models.LuisResult": "Language.Conversations.LuisResult", + "azure.ai.language.conversations.models.LuisTargetIntentResult": "Language.Conversations.LuisTargetIntentResult", "azure.ai.language.conversations.models.MetadataFilter": "Language.Conversations.MetadataFilter", "azure.ai.language.conversations.models.MetadataRecord": "Language.Conversations.MetadataRecord", "azure.ai.language.conversations.models.NamedEntity": "Language.Conversations.Entity", @@ -122,9 +126,9 @@ "azure.ai.language.conversations.models.AnalyzeConversationOperationResultsKind": "Language.Conversations.AnalyzeConversationResultsKind", "azure.ai.language.conversations.models.SummaryLengthBucket": "Language.Conversations.SummaryLengthBucket", "azure.ai.language.conversations.models.SummaryAspect": "Language.Conversations.SummaryAspect", - "azure.ai.language.conversations.ConversationAnalysisClient.analyze_conversation": "Language.Conversations.analyzeConversations", - "azure.ai.language.conversations.aio.ConversationAnalysisClient.analyze_conversation": "Language.Conversations.analyzeConversations", - "azure.ai.language.conversations.ConversationAnalysisClient.begin_cancel_job": "Language.Conversations.analyzeConversationsCancelJob", - "azure.ai.language.conversations.aio.ConversationAnalysisClient.begin_cancel_job": "Language.Conversations.analyzeConversationsCancelJob" + "azure.ai.language.conversations.ConversationAnalysis.analyze_conversation": "Language.Conversations.analyzeConversations", + "azure.ai.language.conversations.aio.ConversationAnalysis.analyze_conversation": "Language.Conversations.analyzeConversations", + "azure.ai.language.conversations.ConversationAnalysis.begin_cancel_job": "Language.Conversations.analyzeConversationsCancelJob", + "azure.ai.language.conversations.aio.ConversationAnalysis.begin_cancel_job": "Language.Conversations.analyzeConversationsCancelJob" } } \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py index 93ba412cb15c..dc13f060daf4 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._client import ConversationAnalysisClient # type: ignore +from ._client import ConversationAnalysis # type: ignore from ._version import VERSION __version__ = VERSION @@ -25,7 +25,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "ConversationAnalysisClient", + "ConversationAnalysis", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_client.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_client.py index b779cac5bf09..c337d532768f 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_client.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_client.py @@ -15,15 +15,15 @@ from azure.core.pipeline import policies from azure.core.rest import HttpRequest, HttpResponse -from ._configuration import ConversationAnalysisClientConfiguration -from ._operations import _ConversationAnalysisClientOperationsMixin +from ._configuration import ConversationAnalysisConfiguration +from ._operations import _ConversationAnalysisOperationsMixin from ._utils.serialization import Deserializer, Serializer if TYPE_CHECKING: from azure.core.credentials import TokenCredential -class ConversationAnalysisClient(_ConversationAnalysisClientOperationsMixin): +class ConversationAnalysis(_ConversationAnalysisOperationsMixin): """The language service conversations API is a suite of natural language processing (NLP) skills that can be used to analyze structured conversations (textual or spoken). The synchronous API in this suite accepts a request and mediates among multiple language projects, such as LUIS @@ -41,7 +41,7 @@ class ConversationAnalysisClient(_ConversationAnalysisClientOperationsMixin): :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2025-05-15-preview". Note that overriding this default value may result in unsupported + "2025-11-15-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no @@ -50,7 +50,7 @@ class ConversationAnalysisClient(_ConversationAnalysisClientOperationsMixin): def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: _endpoint = "{Endpoint}/language" - self._config = ConversationAnalysisClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + self._config = ConversationAnalysisConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py index c5a5e7cab556..0191d8cc47f5 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py @@ -17,8 +17,8 @@ from azure.core.credentials import TokenCredential -class ConversationAnalysisClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for ConversationAnalysisClient. +class ConversationAnalysisConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for ConversationAnalysis. Note that all parameters used to create this instance are saved as instance attributes. @@ -31,13 +31,13 @@ class ConversationAnalysisClientConfiguration: # pylint: disable=too-many-insta :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2025-05-15-preview". Note that overriding this default value may result in unsupported + "2025-11-15-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + api_version: str = kwargs.pop("api_version", "2025-11-15-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/__init__.py index 90c37e0a0e5b..4f55a6b05b89 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import _ConversationAnalysisClientOperationsMixin # type: ignore # pylint: disable=unused-import +from ._operations import _ConversationAnalysisOperationsMixin # type: ignore # pylint: disable=unused-import from ._patch import __all__ as _patch_all from ._patch import * diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py index 74294e9734aa..29daeb3f6faf 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py @@ -5,10 +5,10 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping # pylint:disable=import-error +from collections.abc import MutableMapping from io import IOBase import json -from typing import Any, Callable, Dict, IO, Iterator, Optional, TypeVar, Union, cast, overload +from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload from azure.core import PipelineClient from azure.core.exceptions import ( @@ -29,7 +29,7 @@ from azure.core.utils import case_insensitive_dict from .. import models as _models -from .._configuration import ConversationAnalysisClientConfiguration +from .._configuration import ConversationAnalysisConfiguration from .._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize from .._utils.serialization import Serializer from .._utils.utils import ClientMixinABC @@ -37,7 +37,7 @@ JSON = MutableMapping[str, Any] T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -50,7 +50,7 @@ def build_conversation_analysis_analyze_conversation_request( # pylint: disable _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -73,7 +73,7 @@ def build_conversation_analysis_get_job_status_request( # pylint: disable=name- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -102,7 +102,7 @@ def build_conversation_analysis_analyze_conversation_job_request( # pylint: dis _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-15-preview")) # Construct URL _url = "/analyze-conversations/jobs" @@ -121,7 +121,7 @@ def build_conversation_analysis_cancel_job_request( # pylint: disable=name-too- ) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-15-preview")) # Construct URL _url = "/analyze-conversations/jobs/{jobId}:cancel" path_format_arguments = { @@ -136,8 +136,8 @@ def build_conversation_analysis_cancel_job_request( # pylint: disable=name-too- return HttpRequest(method="POST", url=_url, params=_params, **kwargs) -class _ConversationAnalysisClientOperationsMixin( - ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], ConversationAnalysisClientConfiguration] +class _ConversationAnalysisOperationsMixin( + ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], ConversationAnalysisConfiguration] ): @overload @@ -253,7 +253,10 @@ def analyze_conversation( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) raise HttpResponseError(response=response, model=error) if _stream: @@ -270,7 +273,7 @@ def analyze_conversation( @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "job_id", "show_stats", "accept"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) def _get_job_status( self, job_id: str, *, show_stats: Optional[bool] = None, **kwargs: Any @@ -329,7 +332,10 @@ def _get_job_status( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) raise HttpResponseError(response=response, model=error) if _stream: @@ -345,7 +351,7 @@ def _get_job_status( @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "content_type"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) def _analyze_conversation_job_initial( self, body: Union[_models._models.AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any @@ -396,7 +402,10 @@ def _analyze_conversation_job_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -430,7 +439,7 @@ def _begin_analyze_conversation_job( @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "content_type"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) def _begin_analyze_conversation_job( self, body: Union[_models._models.AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any @@ -488,7 +497,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "job_id"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) def _cancel_job_initial(self, job_id: str, **kwargs: Any) -> Iterator[bytes]: error_map: MutableMapping = { @@ -528,7 +537,10 @@ def _cancel_job_initial(self, job_id: str, **kwargs: Any) -> Iterator[bytes]: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -545,7 +557,7 @@ def _cancel_job_initial(self, job_id: str, **kwargs: Any) -> Iterator[bytes]: @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "job_id"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) def begin_cancel_job(self, job_id: str, **kwargs: Any) -> LROPoller[None]: """Cancel a long-running Text Analysis conversations job. diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_patch.py index 79f8c962e189..87676c65a8f0 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_patch.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -8,9 +7,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py index 659468b51808..87676c65a8f0 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -8,303 +7,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import json -from typing import Any, Callable, Dict, IO, Mapping, Optional, TypeVar, Union, cast, overload, Generic, TYPE_CHECKING -from collections.abc import MutableMapping # pylint:disable=import-error -from urllib.parse import urlparse -from azure.core.exceptions import ( - HttpResponseError, -) -from azure.core.pipeline import PipelineResponse -from azure.core.polling import LROPoller, NoPolling, PollingMethod -from azure.core.polling.base_polling import LROBasePolling -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict -from azure.core.credentials import AzureKeyCredential -from azure.core.paging import ItemPaged -from ._client import ConversationAnalysisClient as AnalysisClientGenerated -from .models import AnalyzeConversationOperationInput, AnalyzeConversationOperationState, ConversationActions -from ._utils.serialization import Serializer -from ._validation import api_version_validation -if TYPE_CHECKING: - from azure.core.credentials import TokenCredential - -JSON = MutableMapping[str, Any] -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def _parse_operation_id(op_loc: Optional[str]) -> Optional[str]: - """Extract the operation ID from an Operation-Location URL. - - :param op_loc: The ``Operation-Location`` header value or URL to parse. - If ``None`` or malformed, no ID can be extracted. - :type op_loc: Optional[str] - :return: The trailing path segment as the operation ID, or ``None`` if not found. - :rtype: Optional[str] - """ - if not op_loc: - return None - path = urlparse(op_loc).path.rstrip("/") - if "/" not in path: - return None - return path.rsplit("/", 1)[-1] - - -PollingReturnType_co = TypeVar("PollingReturnType_co", covariant=True) - - -class AnalyzeConversationLROPoller(LROPoller[PollingReturnType_co], Generic[PollingReturnType_co]): - """Custom poller that returns PollingReturnType_co and exposes operation metadata.""" - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - # populated by the deserialization callback in your begin_* method - self._last_state: Optional["AnalyzeConversationOperationState"] = None - - def _record_state_for_details(self, state: "AnalyzeConversationOperationState") -> None: - """Internal: update the state used by ``.details``. - - :param state: The latest operation state to expose via ``details``. - :type state: AnalyzeConversationOperationState - :return: None - :rtype: None - """ - self._last_state = state - - @property - def details(self) -> Mapping[str, Any]: - """Metadata associated with the long-running operation. - - :return: A mapping with keys like ``operation_id`` and, when available, - ``status``, ``job_id``, ``display_name``, ``created_date_time``, - ``last_updated_date_time``, ``expiration_date_time``, ``statistics``, - ``errors``, and ``next_link``. - :rtype: Mapping[str, Any] - """ - try: - headers = getattr(self.polling_method(), "_initial_response").http_response.headers # type: ignore[attr-defined] - op_loc = headers.get("Operation-Location") or headers.get("operation-location") - except (AttributeError, TypeError): - # missing attributes in the chain, or headers is not a mapping - op_loc = None - - op_id = _parse_operation_id(op_loc) - info: Dict[str, Any] = {"operation_id": op_id} - - # Merge fields from the final state (if available) - if self._last_state is not None: - s = self._last_state - info.update( - { - "status": s.status, - "job_id": s.job_id, - "display_name": s.display_name, - "created_date_time": s.created_date_time, - "last_updated_date_time": s.last_updated_date_time, - "expiration_date_time": s.expiration_date_time, - "statistics": s.statistics, - "errors": s.errors, - "next_link": s.next_link, - } - ) - return info - - @classmethod - def from_continuation_token( - cls, - polling_method: PollingMethod[PollingReturnType_co], - continuation_token: str, - **kwargs: Any, - ) -> "AnalyzeConversationLROPoller[PollingReturnType_co]": - client, initial_response, deserialization_callback = polling_method.from_continuation_token( - continuation_token, **kwargs - ) - return cls(client, initial_response, deserialization_callback, polling_method) - - -class ConversationAnalysisClient(AnalysisClientGenerated): - - def __init__( - self, - endpoint: str, - credential: Union[AzureKeyCredential, "TokenCredential"], - *, - api_version: Optional[str] = None, - **kwargs: Any, - ) -> None: - """Create a ConversationAnalysisClient. - :param endpoint: Supported Cognitive Services endpoint. - :type endpoint: str - :param credential: Key or token credential. - :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2025-05-15-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str` - """ - if api_version is not None: - kwargs["api_version"] = api_version - super().__init__(endpoint=endpoint, credential=credential, **kwargs) - - @overload - def begin_analyze_conversation_job( - self, body: AnalyzeConversationOperationInput, *, content_type: str = "application/json", **kwargs: Any - ) -> AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]: - """Analyzes the input conversation utterance. - - :param body: The input for the analyze conversations operation. Required. - :type body: ~azure.ai.language.conversations.models.AnalyzeConversationOperationInput - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: A poller whose ``result()`` yields ``ItemPaged[ConversationActions]`` and exposes metadata via ``.details``. - :rtype: ~azure.ai.language.conversations.AnalyzeConversationLROPoller[ - ~azure.core.paging.ItemPaged[~azure.ai.language.conversations.models.ConversationActions]] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def begin_analyze_conversation_job( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]: - """Analyzes the input conversation utterance. - - :param body: The input for the analyze conversations operation. Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: A poller whose ``result()`` yields ``ItemPaged[ConversationActions]`` and exposes metadata via ``.details``. - :rtype: ~azure.ai.language.conversations.AnalyzeConversationLROPoller[ - ~azure.core.paging.ItemPaged[~azure.ai.language.conversations.models.ConversationActions]] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def begin_analyze_conversation_job( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]: - """Analyzes the input conversation utterance. - - :param body: The input for the analyze conversations operation. Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: A poller whose ``result()`` yields ``ItemPaged[ConversationActions]`` and exposes metadata via ``.details``. - :rtype: ~azure.ai.language.conversations.AnalyzeConversationLROPoller[ - ~azure.core.paging.ItemPaged[~azure.ai.language.conversations.models.ConversationActions]] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - @api_version_validation( - method_added_on="2023-04-01", - params_added_on={"2023-04-01": ["api_version", "content_type", "accept"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], - ) - def begin_analyze_conversation_job( # type: ignore[override] - self, body: Union[AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any - ) -> AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]: - """Analyzes the input conversation utterance. - - :param body: The input for the analyze conversations operation. Required. - :type body: ~azure.ai.language.conversations.models.AnalyzeConversationOperationInput or JSON or IO[bytes] - :return: A poller whose ``result()`` yields ``ItemPaged[ConversationActions]`` and exposes metadata via ``.details``. - :rtype: ~azure.ai.language.conversations.AnalyzeConversationLROPoller[ - ~azure.core.paging.ItemPaged[~azure.ai.language.conversations.models.ConversationActions]] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - polling: Union[bool, PollingMethod[ItemPaged["ConversationActions"]]] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - cls = kwargs.pop("cls", None) # optional custom deserializer - kwargs.pop("error_map", None) - - path_format_arguments = { - "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - } - - def _fetch_state_by_next_link(next_link: str) -> AnalyzeConversationOperationState: - req = HttpRequest("GET", next_link) - resp = self._client.send_request(req) # type: ignore[attr-defined] - if resp.status_code != 200: - raise HttpResponseError(response=resp) - data = json.loads(resp.text()) - return AnalyzeConversationOperationState(data) - - def _build_pager_from_state(state: AnalyzeConversationOperationState) -> ItemPaged["ConversationActions"]: - def extract_data(s: AnalyzeConversationOperationState): - next_link = s.next_link - actions: ConversationActions = s.actions - return next_link, [actions] - - def get_next(token: Optional[str]) -> Optional[AnalyzeConversationOperationState]: - if token is None: - return state - if not token: - return None - return _fetch_state_by_next_link(token) - - return ItemPaged(get_next, extract_data) - - # ----- end paging helpers - - # filled after creating the poller; used inside the deserializer - poller_holder: Dict[str, AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]] = {} - - def get_long_running_output(pipeline_response): - final_response = pipeline_response.http_response - if final_response.status_code == 200: - data = json.loads(final_response.text()) - op_state = AnalyzeConversationOperationState(data) - - # stash state on the custom poller for `.details` - poller_ref = poller_holder["poller"] - poller_ref._record_state_for_details(op_state) # pylint:disable=protected-access - - paged = _build_pager_from_state(op_state) - return cls(pipeline_response, paged, {}) if cls else paged - raise HttpResponseError(response=final_response) - - # ----- polling method selection - if polling is True: - polling_method: PollingMethod[ItemPaged["ConversationActions"]] = cast( - PollingMethod[ItemPaged["ConversationActions"]], - LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), - ) - elif polling is False: - polling_method = cast(PollingMethod[ItemPaged["ConversationActions"]], NoPolling()) - else: - polling_method = cast(PollingMethod[ItemPaged["ConversationActions"]], polling) - - if cont_token: - return AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - ) - - # Submit the job - raw_result = self._analyze_conversation_job_initial( - body=body, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs - ) - - lro: AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]] = AnalyzeConversationLROPoller( - self._client, raw_result, get_long_running_output, polling_method - ) - poller_holder["poller"] = lro - return lro +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -314,6 +19,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -__all__ = ["ConversationAnalysisClient", "AnalyzeConversationLROPoller"] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/model_base.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/model_base.py index 2acbba293ae8..12926fa98dcf 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/model_base.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/model_base.py @@ -22,7 +22,7 @@ from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import xml.etree.ElementTree as ET -from collections.abc import MutableMapping # pylint:disable=import-error +from collections.abc import MutableMapping from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError @@ -346,7 +346,7 @@ def _get_model(module_name: str, model_name: str): class _MyMutableMapping(MutableMapping[str, typing.Any]): - def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + def __init__(self, data: dict[str, typing.Any]) -> None: self._data = data def __contains__(self, key: typing.Any) -> bool: @@ -426,7 +426,7 @@ def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: return self._data.pop(key) return self._data.pop(key, default) - def popitem(self) -> typing.Tuple[str, typing.Any]: + def popitem(self) -> tuple[str, typing.Any]: """ Removes and returns some (key, value) pair :returns: The (key, value) pair. @@ -514,9 +514,7 @@ def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-m return o -def _get_rest_field( - attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str -) -> typing.Optional["_RestField"]: +def _get_rest_field(attr_to_rest_field: dict[str, "_RestField"], rest_name: str) -> typing.Optional["_RestField"]: try: return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) except StopIteration: @@ -539,7 +537,7 @@ class Model(_MyMutableMapping): _is_model = True # label whether current class's _attr_to_rest_field has been calculated # could not see _attr_to_rest_field directly because subclass inherits it from parent class - _calculated: typing.Set[str] = set() + _calculated: set[str] = set() def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: class_name = self.__class__.__name__ @@ -624,7 +622,7 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order - attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + attr_to_rest_field: dict[str, _RestField] = { # map attribute name to rest_field property k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") } annotations = { @@ -639,10 +637,10 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) if not rf._rest_name_input: rf._rest_name_input = attr - cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - return super().__new__(cls) # pylint:disable=no-value-for-parameter + return super().__new__(cls) def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: @@ -681,7 +679,7 @@ def _deserialize(cls, data, exist_discriminators): mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member return mapped_cls._deserialize(data, exist_discriminators) - def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + def as_dict(self, *, exclude_readonly: bool = False) -> dict[str, typing.Any]: """Return a dict that can be turned into json using json.dump. :keyword bool exclude_readonly: Whether to remove the readonly properties. @@ -741,7 +739,7 @@ def _deserialize_with_union(deserializers, obj): def _deserialize_dict( value_deserializer: typing.Optional[typing.Callable], module: typing.Optional[str], - obj: typing.Dict[typing.Any, typing.Any], + obj: dict[typing.Any, typing.Any], ): if obj is None: return obj @@ -751,7 +749,7 @@ def _deserialize_dict( def _deserialize_multiple_sequence( - entry_deserializers: typing.List[typing.Optional[typing.Callable]], + entry_deserializers: list[typing.Optional[typing.Callable]], module: typing.Optional[str], obj, ): @@ -772,14 +770,14 @@ def _deserialize_sequence( return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) -def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: +def _sorted_annotations(types: list[typing.Any]) -> list[typing.Any]: return sorted( types, key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), ) -def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-statements, too-many-branches annotation: typing.Any, module: typing.Optional[str], rf: typing.Optional["_RestField"] = None, @@ -844,7 +842,10 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-retur return functools.partial(_deserialize_with_union, deserializers) try: - if annotation._name == "Dict": # pyright: ignore + annotation_name = ( + annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore + ) + if annotation_name.lower() == "dict": value_deserializer = _get_deserialize_callable_from_annotation( annotation.__args__[1], module, rf # pyright: ignore ) @@ -857,7 +858,10 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-retur except (AttributeError, IndexError): pass try: - if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + annotation_name = ( + annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore + ) + if annotation_name.lower() in ["list", "set", "tuple", "sequence"]: if len(annotation.__args__) > 1: # pyright: ignore entry_deserializers = [ _get_deserialize_callable_from_annotation(dt, module, rf) @@ -975,11 +979,11 @@ def __init__( name: typing.Optional[str] = None, type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin is_discriminator: bool = False, - visibility: typing.Optional[typing.List[str]] = None, + visibility: typing.Optional[list[str]] = None, default: typing.Any = _UNSET, format: typing.Optional[str] = None, is_multipart_file_input: bool = False, - xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + xml: typing.Optional[dict[str, typing.Any]] = None, ): self._type = type self._rest_name_input = name @@ -1037,11 +1041,11 @@ def rest_field( *, name: typing.Optional[str] = None, type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin - visibility: typing.Optional[typing.List[str]] = None, + visibility: typing.Optional[list[str]] = None, default: typing.Any = _UNSET, format: typing.Optional[str] = None, is_multipart_file_input: bool = False, - xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + xml: typing.Optional[dict[str, typing.Any]] = None, ) -> typing.Any: return _RestField( name=name, @@ -1058,8 +1062,8 @@ def rest_discriminator( *, name: typing.Optional[str] = None, type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin - visibility: typing.Optional[typing.List[str]] = None, - xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + visibility: typing.Optional[list[str]] = None, + xml: typing.Optional[dict[str, typing.Any]] = None, ) -> typing.Any: return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) @@ -1078,9 +1082,9 @@ def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: def _get_element( o: typing.Any, exclude_readonly: bool = False, - parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, + parent_meta: typing.Optional[dict[str, typing.Any]] = None, wrapped_element: typing.Optional[ET.Element] = None, -) -> typing.Union[ET.Element, typing.List[ET.Element]]: +) -> typing.Union[ET.Element, list[ET.Element]]: if _is_model(o): model_meta = getattr(o, "_xml", {}) @@ -1169,7 +1173,7 @@ def _get_element( def _get_wrapped_element( v: typing.Any, exclude_readonly: bool, - meta: typing.Optional[typing.Dict[str, typing.Any]], + meta: typing.Optional[dict[str, typing.Any]], ) -> ET.Element: wrapped_element = _create_xml_element( meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None @@ -1212,7 +1216,7 @@ def _deserialize_xml( def _convert_element(e: ET.Element): # dict case if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: - dict_result: typing.Dict[str, typing.Any] = {} + dict_result: dict[str, typing.Any] = {} for child in e: if dict_result.get(child.tag) is not None: if isinstance(dict_result[child.tag], list): @@ -1225,7 +1229,7 @@ def _convert_element(e: ET.Element): return dict_result # array case if len(e) > 0: - array_result: typing.List[typing.Any] = [] + array_result: list[typing.Any] = [] for child in e: array_result.append(_convert_element(child)) return array_result diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/serialization.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/serialization.py index eb86ea23c965..45a3e44e45cb 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/serialization.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/serialization.py @@ -21,7 +21,6 @@ import sys import codecs from typing import ( - Dict, Any, cast, Optional, @@ -31,7 +30,6 @@ Mapping, Callable, MutableMapping, - List, ) try: @@ -229,12 +227,12 @@ class Model: serialization and deserialization. """ - _subtype_map: Dict[str, Dict[str, Any]] = {} - _attribute_map: Dict[str, Dict[str, Any]] = {} - _validation: Dict[str, Dict[str, Any]] = {} + _subtype_map: dict[str, dict[str, Any]] = {} + _attribute_map: dict[str, dict[str, Any]] = {} + _validation: dict[str, dict[str, Any]] = {} def __init__(self, **kwargs: Any) -> None: - self.additional_properties: Optional[Dict[str, Any]] = {} + self.additional_properties: Optional[dict[str, Any]] = {} for k in kwargs: # pylint: disable=consider-using-dict-items if k not in self._attribute_map: _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) @@ -311,7 +309,7 @@ def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: def as_dict( self, keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + key_transformer: Callable[[str, dict[str, Any], Any], Any] = attribute_transformer, **kwargs: Any ) -> JSON: """Return a dict that can be serialized using json.dump. @@ -380,7 +378,7 @@ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: def from_dict( cls, data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + key_extractors: Optional[Callable[[str, dict[str, Any], Any], Any]] = None, content_type: Optional[str] = None, ) -> Self: """Parse a dict using given key extractor return a model. @@ -414,7 +412,7 @@ def _flatten_subtype(cls, key, objects): return {} result = dict(cls._subtype_map[key]) for valuetype in cls._subtype_map[key].values(): - result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access + result |= objects[valuetype]._flatten_subtype(key, objects) # pylint: disable=protected-access return result @classmethod @@ -528,7 +526,7 @@ def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: "[]": self.serialize_iter, "{}": self.serialize_dict, } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.dependencies: dict[str, type] = dict(classes) if classes else {} self.key_transformer = full_restapi_key_transformer self.client_side_validation = True @@ -579,7 +577,7 @@ def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, to if attr_name == "additional_properties" and attr_desc["key"] == "": if target_obj.additional_properties is not None: - serialized.update(target_obj.additional_properties) + serialized |= target_obj.additional_properties continue try: @@ -789,7 +787,7 @@ def serialize_data(self, data, data_type, **kwargs): # If dependencies is empty, try with current data class # It has to be a subclass of Enum anyway - enum_type = self.dependencies.get(data_type, data.__class__) + enum_type = self.dependencies.get(data_type, cast(type, data.__class__)) if issubclass(enum_type, Enum): return Serializer.serialize_enum(data, enum_obj=enum_type) @@ -1184,7 +1182,7 @@ def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argumen while "." in key: # Need the cast, as for some reasons "split" is typed as list[str | Any] - dict_keys = cast(List[str], _FLATTEN.split(key)) + dict_keys = cast(list[str], _FLATTEN.split(key)) if len(dict_keys) == 1: key = _decode_attribute_map_key(dict_keys[0]) break @@ -1386,7 +1384,7 @@ def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: "duration": (isodate.Duration, datetime.timedelta), "iso-8601": (datetime.datetime), } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.dependencies: dict[str, type] = dict(classes) if classes else {} self.key_extractors = [rest_key_extractor, xml_key_extractor] # Additional properties only works if the "rest_key_extractor" is used to # extract the keys. Making it to work whatever the key extractor is too much diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py index 0e00a6283246..be71c81bd282 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "2.0.0b1" +VERSION = "1.0.0b1" diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py index 0276ad109495..732438501d11 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._client import ConversationAnalysisClient # type: ignore +from ._client import ConversationAnalysis # type: ignore try: from ._patch import __all__ as _patch_all @@ -22,7 +22,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "ConversationAnalysisClient", + "ConversationAnalysis", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_client.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_client.py index 5c7c824ce589..8b5407baa17c 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_client.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_client.py @@ -16,14 +16,14 @@ from azure.core.rest import AsyncHttpResponse, HttpRequest from .._utils.serialization import Deserializer, Serializer -from ._configuration import ConversationAnalysisClientConfiguration -from ._operations import _ConversationAnalysisClientOperationsMixin +from ._configuration import ConversationAnalysisConfiguration +from ._operations import _ConversationAnalysisOperationsMixin if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential -class ConversationAnalysisClient(_ConversationAnalysisClientOperationsMixin): +class ConversationAnalysis(_ConversationAnalysisOperationsMixin): """The language service conversations API is a suite of natural language processing (NLP) skills that can be used to analyze structured conversations (textual or spoken). The synchronous API in this suite accepts a request and mediates among multiple language projects, such as LUIS @@ -41,7 +41,7 @@ class ConversationAnalysisClient(_ConversationAnalysisClientOperationsMixin): :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2025-05-15-preview". Note that overriding this default value may result in unsupported + "2025-11-15-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no @@ -52,7 +52,7 @@ def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: _endpoint = "{Endpoint}/language" - self._config = ConversationAnalysisClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + self._config = ConversationAnalysisConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py index 7845581c1421..cea38d85d71e 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py @@ -17,8 +17,8 @@ from azure.core.credentials_async import AsyncTokenCredential -class ConversationAnalysisClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for ConversationAnalysisClient. +class ConversationAnalysisConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for ConversationAnalysis. Note that all parameters used to create this instance are saved as instance attributes. @@ -31,7 +31,7 @@ class ConversationAnalysisClientConfiguration: # pylint: disable=too-many-insta :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2025-05-15-preview". Note that overriding this default value may result in unsupported + "2025-11-15-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -39,7 +39,7 @@ class ConversationAnalysisClientConfiguration: # pylint: disable=too-many-insta def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + api_version: str = kwargs.pop("api_version", "2025-11-15-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/__init__.py index 90c37e0a0e5b..4f55a6b05b89 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import _ConversationAnalysisClientOperationsMixin # type: ignore # pylint: disable=unused-import +from ._operations import _ConversationAnalysisOperationsMixin # type: ignore # pylint: disable=unused-import from ._patch import __all__ as _patch_all from ._patch import * diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py index 246b19d00470..d2456af5ae41 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py @@ -6,10 +6,10 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping # pylint:disable=import-error +from collections.abc import MutableMapping from io import IOBase import json -from typing import Any, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload +from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload from azure.core import AsyncPipelineClient from azure.core.exceptions import ( @@ -39,15 +39,15 @@ from ..._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize from ..._utils.utils import ClientMixinABC from ..._validation import api_version_validation -from .._configuration import ConversationAnalysisClientConfiguration +from .._configuration import ConversationAnalysisConfiguration JSON = MutableMapping[str, Any] T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] -class _ConversationAnalysisClientOperationsMixin( - ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], ConversationAnalysisClientConfiguration] +class _ConversationAnalysisOperationsMixin( + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], ConversationAnalysisConfiguration] ): @overload @@ -163,7 +163,10 @@ async def analyze_conversation( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) raise HttpResponseError(response=response, model=error) if _stream: @@ -180,7 +183,7 @@ async def analyze_conversation( @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "job_id", "show_stats", "accept"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) async def _get_job_status( self, job_id: str, *, show_stats: Optional[bool] = None, **kwargs: Any @@ -239,7 +242,10 @@ async def _get_job_status( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) raise HttpResponseError(response=response, model=error) if _stream: @@ -255,7 +261,7 @@ async def _get_job_status( @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "content_type"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) async def _analyze_conversation_job_initial( self, body: Union[_models._models.AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any @@ -306,7 +312,10 @@ async def _analyze_conversation_job_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -340,7 +349,7 @@ async def _begin_analyze_conversation_job( @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "content_type"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) async def _begin_analyze_conversation_job( self, body: Union[_models._models.AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any @@ -399,7 +408,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "job_id"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) async def _cancel_job_initial(self, job_id: str, **kwargs: Any) -> AsyncIterator[bytes]: error_map: MutableMapping = { @@ -439,7 +448,10 @@ async def _cancel_job_initial(self, job_id: str, **kwargs: Any) -> AsyncIterator except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -456,7 +468,7 @@ async def _cancel_job_initial(self, job_id: str, **kwargs: Any) -> AsyncIterator @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "job_id"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) async def begin_cancel_job(self, job_id: str, **kwargs: Any) -> AsyncLROPoller[None]: """Cancel a long-running Text Analysis conversations job. diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_patch.py index 8bcb627aa475..87676c65a8f0 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_patch.py @@ -7,9 +7,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py index 18aec5d188e6..87676c65a8f0 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -8,308 +7,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import json -from typing import Any, Callable, Dict, IO, Mapping, Optional, TypeVar, Union, cast, overload, Generic, TYPE_CHECKING -from collections.abc import MutableMapping # pylint:disable=import-error -from urllib.parse import urlparse -from azure.core.exceptions import ( - HttpResponseError, -) -from azure.core.pipeline import PipelineResponse -from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod -from azure.core.polling.async_base_polling import AsyncLROBasePolling -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict -from azure.core.credentials import AzureKeyCredential -from azure.core.async_paging import AsyncItemPaged -from ._client import ConversationAnalysisClient as AnalysisClientGenerated -from ..models import AnalyzeConversationOperationInput, AnalyzeConversationOperationState, ConversationActions -from .._validation import api_version_validation -if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential - -JSON = MutableMapping[str, Any] -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -def _parse_operation_id(op_loc: Optional[str]) -> Optional[str]: - """Extract the operation ID from an Operation-Location URL. - - :param op_loc: The ``Operation-Location`` header value or URL to parse. - If ``None`` or malformed, no ID can be extracted. - :type op_loc: Optional[str] - :return: The trailing path segment as the operation ID, or ``None`` if not found. - :rtype: Optional[str] - """ - if not op_loc: - return None - path = urlparse(op_loc).path.rstrip("/") - if "/" not in path: - return None - return path.rsplit("/", 1)[-1] - - -PollingReturnType_co = TypeVar("PollingReturnType_co", covariant=True) - - -class AnalyzeConversationAsyncLROPoller(AsyncLROPoller[PollingReturnType_co], Generic[PollingReturnType_co]): - """Async poller that returns PollingReturnType_co and exposes operation metadata.""" - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - # populated by your deserialization callback in begin_*_async - self._last_state: Optional["AnalyzeConversationOperationState"] = None - - def _record_state_for_details(self, state: "AnalyzeConversationOperationState") -> None: - """Internal: update the state used by ``.details``. - - :param state: The latest operation state to expose via ``details``. - :type state: AnalyzeConversationOperationState - :return: None - :rtype: None - """ - self._last_state = state - - @property - def details(self) -> Mapping[str, Any]: - """Metadata associated with the long-running operation. - - :return: A mapping with keys like ``operation_id`` and, when available, - ``status``, ``job_id``, ``display_name``, ``created_date_time``, - ``last_updated_date_time``, ``expiration_date_time``, ``statistics``, - ``errors``, and ``next_link``. - :rtype: Mapping[str, Any] - """ - try: - headers = getattr(self.polling_method(), "_initial_response").http_response.headers # type: ignore[attr-defined] - op_loc = headers.get("Operation-Location") or headers.get("operation-location") - except (AttributeError, TypeError): - # missing attributes in the chain, or headers is not a mapping - op_loc = None - - info: Dict[str, Any] = {"operation_id": _parse_operation_id(op_loc)} - - # Enrich from final state if available - if self._last_state is not None: - s = self._last_state - info.update( - { - "status": s.status, - "job_id": s.job_id, - "display_name": s.display_name, - "created_date_time": s.created_date_time, - "last_updated_date_time": s.last_updated_date_time, - "expiration_date_time": s.expiration_date_time, - "statistics": s.statistics, - "errors": s.errors, - "next_link": s.next_link, - } - ) - return info - - @classmethod - def from_continuation_token( - cls, - polling_method: AsyncPollingMethod[PollingReturnType_co], - continuation_token: str, - **kwargs: Any, - ) -> "AnalyzeConversationAsyncLROPoller[PollingReturnType_co]": - client, initial_response, deserialization_callback = polling_method.from_continuation_token( - continuation_token, **kwargs - ) - return cls(client, initial_response, deserialization_callback, polling_method) - - -class ConversationAnalysisClient(AnalysisClientGenerated): - - def __init__( - self, - endpoint: str, - credential: Union[AzureKeyCredential, "AsyncTokenCredential"], - *, - api_version: Optional[str] = None, - **kwargs: Any, - ) -> None: - """Create a ConversationAnalysisClient. - :param endpoint: Supported Cognitive Services endpoint. - :type endpoint: str - :param credential: Key or token credential. - :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2025-05-15-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str` - """ - if api_version is not None: - kwargs["api_version"] = api_version - super().__init__(endpoint=endpoint, credential=credential, **kwargs) - - @overload - async def begin_analyze_conversation_job( - self, body: AnalyzeConversationOperationInput, *, content_type: str = "application/json", **kwargs: Any - ) -> AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]: - """Analyzes the input conversation utterance. - - :param body: The input for the analyze conversations operation. Required. - :type body: ~azure.ai.language.conversations.models.AnalyzeConversationOperationInput - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: An async poller whose ``result()`` yields ``AsyncItemPaged[ConversationActions]``; exposes metadata via ``.details``. - :rtype: ~azure.ai.language.conversations.AnalyzeConversationAsyncLROPoller[ - ~azure.core.async_paging.AsyncItemPaged[~azure.ai.language.conversations.models.ConversationActions]] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def begin_analyze_conversation_job( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]: - """Analyzes the input conversation utterance. - - :param body: The input for the analyze conversations operation. Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: An async poller whose ``result()`` yields ``AsyncItemPaged[ConversationActions]``; exposes metadata via ``.details``. - :rtype: ~azure.ai.language.conversations.AnalyzeConversationAsyncLROPoller[ - ~azure.core.async_paging.AsyncItemPaged[~azure.ai.language.conversations.models.ConversationActions]] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def begin_analyze_conversation_job( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]: - """Analyzes the input conversation utterance. - - :param body: The input for the analyze conversations operation. Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: An async poller whose ``result()`` yields ``AsyncItemPaged[ConversationActions]``; exposes metadata via ``.details``. - :rtype: ~azure.ai.language.conversations.AnalyzeConversationAsyncLROPoller[ - ~azure.core.async_paging.AsyncItemPaged[~azure.ai.language.conversations.models.ConversationActions]] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - @api_version_validation( - method_added_on="2023-04-01", - params_added_on={"2023-04-01": ["api_version", "content_type", "accept"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], - ) - async def begin_analyze_conversation_job( # type: ignore[override] - self, body: Union[AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any - ) -> AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]: - """Analyzes the input conversation utterance. - - :param body: The input for the analyze conversations operation. Required. - :type body: ~azure.ai.language.conversations.models.AnalyzeConversationOperationInput or JSON or IO[bytes] - :return: An async poller whose ``result()`` yields ``AsyncItemPaged[ConversationActions]``; exposes metadata via ``.details``. - :rtype: ~azure.ai.language.conversations.AnalyzeConversationAsyncLROPoller[ - ~azure.core.async_paging.AsyncItemPaged[~azure.ai.language.conversations.models.ConversationActions]] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - polling: Union[bool, AsyncPollingMethod[AsyncItemPaged["ConversationActions"]]] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - cls = kwargs.pop("cls", None) # optional custom deserializer - kwargs.pop("error_map", None) - - path_format_arguments = { - "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - # ---- paging helpers (state -> AsyncItemPaged[ConversationActions]) - - async def _fetch_state_by_next_link(next_link: str) -> AnalyzeConversationOperationState: - req = HttpRequest("GET", next_link) - resp = await self._client.send_request(req) # type: ignore[attr-defined] - if resp.status_code != 200: - raise HttpResponseError(response=resp) - await resp.read() - data = json.loads(resp.text()) - return AnalyzeConversationOperationState(data) - - def _build_pager_from_state(state: AnalyzeConversationOperationState) -> AsyncItemPaged["ConversationActions"]: - async def extract_data(s: AnalyzeConversationOperationState): - next_link = s.next_link - actions: ConversationActions = s.actions - return next_link, [actions] - - async def get_next(token: Optional[str]) -> Optional[AnalyzeConversationOperationState]: - if token is None: - return state - if not token: - return None - return await _fetch_state_by_next_link(token) - - return AsyncItemPaged(get_next, extract_data) - - # holder to let the deserializer set poller._last_state - poller_holder: Dict[str, AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]] = {} - - # ---- deserializer: final HTTP -> AsyncItemPaged[ConversationActions] - def get_long_running_output(pipeline_response): - final = pipeline_response.http_response - if final.status_code == 200: - data = json.loads(final.text()) - op_state = AnalyzeConversationOperationState(data) - - poller_ref = poller_holder["poller"] - poller_ref._record_state_for_details(op_state) # pylint:disable=protected-access - - paged = _build_pager_from_state(op_state) - return cls(pipeline_response, paged, {}) if cls else paged - raise HttpResponseError(response=final) - - # ---- polling method - if polling is True: - polling_method: AsyncPollingMethod[AsyncItemPaged["ConversationActions"]] = cast( - AsyncPollingMethod[AsyncItemPaged["ConversationActions"]], - AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), - ) - elif polling is False: - polling_method = cast(AsyncPollingMethod[AsyncItemPaged["ConversationActions"]], AsyncNoPolling()) - else: - polling_method = cast(AsyncPollingMethod[AsyncItemPaged["ConversationActions"]], polling) - - # ---- resume path - if cont_token: - return AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - ) - - # ---- submit job - raw_result = await self._analyze_conversation_job_initial( - body=body, - content_type=content_type, - cls=lambda x, y, z: x, # passthrough - headers=_headers, - params=_params, - **kwargs, - ) - # buffer initial body so .text() is available later - await raw_result.http_response.read() # type: ignore[attr-defined] - - # ---- build custom async poller - lro: AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]] = ( - AnalyzeConversationAsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) - ) - poller_holder["poller"] = lro - return lro +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -319,6 +19,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -__all__ = ["ConversationAnalysisClient", "AnalyzeConversationAsyncLROPoller"] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py index 04430a5a96f8..f3fb03c3482d 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py @@ -75,6 +75,10 @@ KnowledgeBaseAnswerPrompt, LengthResolution, ListKey, + LuisCallingConfig, + LuisConfig, + LuisResult, + LuisTargetIntentResult, MetadataFilter, MetadataRecord, NamedEntity, @@ -205,6 +209,10 @@ "KnowledgeBaseAnswerPrompt", "LengthResolution", "ListKey", + "LuisCallingConfig", + "LuisConfig", + "LuisResult", + "LuisTargetIntentResult", "MetadataFilter", "MetadataRecord", "NamedEntity", diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_enums.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_enums.py index 738da186c5f6..f83b15e45e62 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_enums.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_enums.py @@ -186,6 +186,48 @@ class ConversationPiiCategories(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Phone category""" US_SOCIAL_SECURITY_NUMBER = "USSocialSecurityNumber" """US social security number category""" + DRIVERS_LICENSE_NUMBER = "DriversLicenseNumber" + """Driver's license number category""" + PASSPORT_NUMBER = "PassportNumber" + """Passport number category""" + PERSON_TYPE = "PersonType" + """Person type category""" + ORGANIZATION = "Organization" + """Organization category""" + ABA_ROUTING_NUMBER = "ABARoutingNumber" + """ABA routing number category""" + BANK_ACCOUNT_NUMBER = "BankAccountNumber" + """Bank account number category""" + DATE_OF_BIRTH = "DateOfBirth" + """Date of birth category""" + INTERNATIONAL_BANKING_ACCOUNT_NUMBER = "InternationalBankingAccountNumber" + """International Bank Account Number category""" + SWIFT_CODE = "SWIFTCode" + """SWIFT code category""" + VEHICLE_IDENTIFICATION_NUMBER = "VehicleIdentificationNumber" + """Vehicle identification number category""" + AGE = "Age" + """Age category""" + DATE = "Date" + """Date category""" + ZIP_CODE = "ZipCode" + """Zip code category""" + GOVERNMENT_ISSUED_ID = "GovernmentIssuedId" + """Government issued ID category""" + CVV = "CVV" + """Card verification value category""" + HEALTH_CARD_NUMBER = "HealthCardNumber" + """Health card number category""" + CA_SOCIAL_INSURANCE_NUMBER = "CASocialInsuranceNumber" + """CA Social insurance number category""" + US_MEDICARE_BENEFICIARY_ID = "USMedicareBeneficiaryId" + """US Medicare beneficiary ID category""" + GITHUB_ACCOUNT = "GithubAccount" + """Github account category""" + LOCATION = "Location" + """Location category""" + GPE = "GPE" + """GPE category""" ALL = "All" """All categories""" DEFAULT = "Default" @@ -211,6 +253,48 @@ class ConversationPiiCategoryExclusions(str, Enum, metaclass=CaseInsensitiveEnum """Phone category""" US_SOCIAL_SECURITY_NUMBER = "USSocialSecurityNumber" """US social security number category""" + DRIVERS_LICENSE_NUMBER = "DriversLicenseNumber" + """Driver's license number category""" + PASSPORT_NUMBER = "PassportNumber" + """Passport number category""" + PERSON_TYPE = "PersonType" + """Person type category""" + ORGANIZATION = "Organization" + """Organization category""" + ABA_ROUTING_NUMBER = "ABARoutingNumber" + """ABA routing number category""" + BANK_ACCOUNT_NUMBER = "BankAccountNumber" + """Bank account number category""" + DATE_OF_BIRTH = "DateOfBirth" + """Date of birth category""" + INTERNATIONAL_BANKING_ACCOUNT_NUMBER = "InternationalBankingAccountNumber" + """International Bank Account Number category""" + SWIFT_CODE = "SWIFTCode" + """SWIFT code category""" + VEHICLE_IDENTIFICATION_NUMBER = "VehicleIdentificationNumber" + """Vehicle identification number category""" + AGE = "Age" + """Age category""" + DATE = "Date" + """Date category""" + ZIP_CODE = "ZipCode" + """Zip code category""" + GOVERNMENT_ISSUED_ID = "GovernmentIssuedId" + """Government issued ID category""" + CVV = "CVV" + """Card verification value category""" + HEALTH_CARD_NUMBER = "HealthCardNumber" + """Health card number category""" + CA_SOCIAL_INSURANCE_NUMBER = "CASocialInsuranceNumber" + """CA Social insurance number category""" + US_MEDICARE_BENEFICIARY_ID = "USMedicareBeneficiaryId" + """US Medicare beneficiary ID category""" + GITHUB_ACCOUNT = "GithubAccount" + """Github account category""" + LOCATION = "Location" + """Location category""" + GPE = "GPE" + """GPE category""" class DateTimeSubKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py index a0806cf3cae6..23adc29d0b66 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py @@ -9,7 +9,7 @@ # pylint: disable=useless-super-delegation import datetime -from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload +from typing import Any, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload from .._utils.model_base import Model as _Model, rest_discriminator, rest_field from ._enums import ( @@ -46,7 +46,7 @@ class ResolutionBase(_Model): :vartype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} resolution_kind: str = rest_discriminator( name="resolutionKind", visibility=["read", "create", "update", "delete", "query"] ) @@ -111,7 +111,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.AGE_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.AGE_RESOLUTION # type: ignore class AnalysisConfig(_Model): @@ -119,7 +120,7 @@ class AnalysisConfig(_Model): services. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ConversationConfig, QuestionAnsweringConfig + ConversationConfig, LuisConfig, QuestionAnsweringConfig :ivar target_project_kind: The type of a target service. Required. Known values are: "Luis", "Conversation", "QuestionAnswering", and "NonLinked". @@ -128,7 +129,7 @@ class AnalysisConfig(_Model): :vartype api_version: str """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} target_project_kind: str = rest_discriminator( name="targetProjectKind", visibility=["read", "create", "update", "delete", "query"] ) @@ -169,7 +170,7 @@ class AnalyzeConversationActionResult(_Model): :vartype kind: str or ~azure.ai.language.conversations.models.AnalyzeConversationResultKind """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """The base class of a conversation input task result. Required. Known values are: \"ConversationResult\" and \"ConversationalAIResult\".""" @@ -203,7 +204,7 @@ class AnalyzeConversationInput(_Model): :vartype kind: str or ~azure.ai.language.conversations.models.AnalyzeConversationInputKind """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} kind: str = rest_discriminator(name="kind", visibility=["read", "create", "query"]) """The base class of a conversation input task. Required. Known values are: \"Conversation\" and \"ConversationalAI\".""" @@ -241,7 +242,7 @@ class AnalyzeConversationOperationAction(_Model): ~azure.ai.language.conversations.models.AnalyzeConversationOperationActionKind """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} name: Optional[str] = rest_field(name="taskName", visibility=["read", "create", "update", "delete", "query"]) """task name.""" kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) @@ -292,7 +293,7 @@ class AnalyzeConversationOperationInput(_Model): name="analysisInput", visibility=["read", "create", "query"] ) """Analysis Input. Required.""" - actions: List["_models._models.AnalyzeConversationOperationAction"] = rest_field( + actions: list["_models._models.AnalyzeConversationOperationAction"] = rest_field( name="tasks", visibility=["read", "create", "update", "delete", "query"] ) """Set of tasks to execute on the input conversation. Required.""" @@ -306,7 +307,7 @@ def __init__( self, *, conversation_input: "_models._models.MultiLanguageConversationInput", - actions: List["_models._models.AnalyzeConversationOperationAction"], + actions: list["_models._models.AnalyzeConversationOperationAction"], display_name: Optional[str] = None, cancel_after: Optional[float] = None, ) -> None: ... @@ -344,7 +345,7 @@ class AnalyzeConversationOperationResult(_Model): ~azure.ai.language.conversations.models.AnalyzeConversationOperationResultsKind """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} last_update_date_time: datetime.datetime = rest_field( name="lastUpdateDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" ) @@ -433,7 +434,7 @@ class AnalyzeConversationOperationState(_Model): ) """status. Required. Known values are: \"notStarted\", \"running\", \"succeeded\", \"partiallyCompleted\", \"failed\", \"cancelled\", and \"cancelling\".""" - errors: Optional[List["_models.ConversationError"]] = rest_field( + errors: Optional[list["_models.ConversationError"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """errors.""" @@ -459,7 +460,7 @@ def __init__( actions: "_models.ConversationActions", display_name: Optional[str] = None, expiration_date_time: Optional[datetime.datetime] = None, - errors: Optional[List["_models.ConversationError"]] = None, + errors: Optional[list["_models.ConversationError"]] = None, next_link: Optional[str] = None, statistics: Optional["_models.ConversationRequestStatistics"] = None, ) -> None: ... @@ -568,7 +569,7 @@ class AnswersResult(_Model): :vartype answers: list[~azure.ai.language.conversations.models.KnowledgeBaseAnswer] """ - answers: Optional[List["_models.KnowledgeBaseAnswer"]] = rest_field( + answers: Optional[list["_models.KnowledgeBaseAnswer"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Represents Answer Result list.""" @@ -577,7 +578,7 @@ class AnswersResult(_Model): def __init__( self, *, - answers: Optional[List["_models.KnowledgeBaseAnswer"]] = None, + answers: Optional[list["_models.KnowledgeBaseAnswer"]] = None, ) -> None: ... @overload @@ -632,7 +633,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.AREA_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.AREA_RESOLUTION # type: ignore class AudioTiming(_Model): @@ -679,7 +681,7 @@ class BaseRedactionPolicy(_Model): :vartype policy_kind: str or ~azure.ai.language.conversations.models.RedactionPolicyKind """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} policy_kind: str = rest_discriminator(name="policyKind", visibility=["read", "create", "update", "delete", "query"]) """The entity RedactionPolicy object kind. Required. Known values are: \"noMask\", \"characterMask\", and \"entityMask\".""" @@ -732,7 +734,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.BOOLEAN_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.BOOLEAN_RESOLUTION # type: ignore class CharacterMaskPolicyType(BaseRedactionPolicy, discriminator="characterMask"): @@ -774,7 +777,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, policy_kind=RedactionPolicyKind.CHARACTER_MASK, **kwargs) + super().__init__(*args, **kwargs) + self.policy_kind = RedactionPolicyKind.CHARACTER_MASK # type: ignore class ConversationActionContent(_Model): @@ -822,7 +826,7 @@ class ConversationActionContent(_Model): name="directTarget", visibility=["read", "create", "update", "delete", "query"] ) """The name of a target project to forward the request to.""" - target_project_parameters: Optional[Dict[str, "_models.AnalysisConfig"]] = rest_field( + target_project_parameters: Optional[dict[str, "_models.AnalysisConfig"]] = rest_field( name="targetProjectParameters", visibility=["read", "create", "update", "delete", "query"] ) """A dictionary representing the parameters for each target project.""" @@ -837,7 +841,7 @@ def __init__( is_logging_enabled: Optional[bool] = None, string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, direct_target: Optional[str] = None, - target_project_parameters: Optional[Dict[str, "_models.AnalysisConfig"]] = None, + target_project_parameters: Optional[dict[str, "_models.AnalysisConfig"]] = None, ) -> None: ... @overload @@ -880,7 +884,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, kind=AnalyzeConversationResultKind.CONVERSATION_RESULT, **kwargs) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationResultKind.CONVERSATION_RESULT # type: ignore class ConversationActions(_Model): @@ -908,7 +913,7 @@ class ConversationActions(_Model): """Count of tasks that are currently in progress. Required.""" total: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Total count of tasks submitted as part of the job. Required.""" - task_results: Optional[List["_models.AnalyzeConversationOperationResult"]] = rest_field( + task_results: Optional[list["_models.AnalyzeConversationOperationResult"]] = rest_field( name="items", visibility=["read", "create", "update", "delete", "query"] ) """List of results from tasks (if available).""" @@ -921,7 +926,7 @@ def __init__( failed: int, in_progress: int, total: int, - task_results: Optional[List["_models.AnalyzeConversationOperationResult"]] = None, + task_results: Optional[list["_models.AnalyzeConversationOperationResult"]] = None, ) -> None: ... @overload @@ -994,11 +999,11 @@ class ConversationalAIAnalysis(_Model): id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the conversation. Required.""" - intents: List["_models.ConversationalAIIntent"] = rest_field( + intents: list["_models.ConversationalAIIntent"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The intent classification results for this conversation. Required.""" - entities: Optional[List["_models.ConversationalAIEntity"]] = rest_field( + entities: Optional[list["_models.ConversationalAIEntity"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Global entities that are matched but not associated with any specific intent.""" @@ -1008,8 +1013,8 @@ def __init__( self, *, id: str, # pylint: disable=redefined-builtin - intents: List["_models.ConversationalAIIntent"], - entities: Optional[List["_models.ConversationalAIEntity"]] = None, + intents: list["_models.ConversationalAIIntent"], + entities: Optional[list["_models.ConversationalAIEntity"]] = None, ) -> None: ... @overload @@ -1030,7 +1035,7 @@ class ConversationalAIAnalysisInput(_Model): :vartype conversations: list[~azure.ai.language.conversations.models.TextConversation] """ - conversations: List["_models.TextConversation"] = rest_field( + conversations: list["_models.TextConversation"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """List of multiple conversations. Required.""" @@ -1039,7 +1044,7 @@ class ConversationalAIAnalysisInput(_Model): def __init__( self, *, - conversations: List["_models.TextConversation"], + conversations: list["_models.TextConversation"], ) -> None: ... @overload @@ -1097,11 +1102,11 @@ class ConversationalAIEntity(_Model): name="conversationItemIndex", visibility=["read", "create", "update", "delete", "query"] ) """The index of the conversation item where the entity appears.""" - resolutions: Optional[List["_models.ResolutionBase"]] = rest_field( + resolutions: Optional[list["_models.ResolutionBase"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Entity resolution details, if available.""" - extra_information: Optional[List["_models.ConversationEntityExtraInformation"]] = rest_field( + extra_information: Optional[list["_models.ConversationEntityExtraInformation"]] = rest_field( name="extraInformation", visibility=["read", "create", "update", "delete", "query"] ) """Additional entity metadata.""" @@ -1117,8 +1122,8 @@ def __init__( length: int, conversation_item_id: str, conversation_item_index: Optional[int] = None, - resolutions: Optional[List["_models.ResolutionBase"]] = None, - extra_information: Optional[List["_models.ConversationEntityExtraInformation"]] = None, + resolutions: Optional[list["_models.ResolutionBase"]] = None, + extra_information: Optional[list["_models.ConversationEntityExtraInformation"]] = None, ) -> None: ... @overload @@ -1151,11 +1156,11 @@ class ConversationalAIIntent(_Model): """The name of the detected intent. Required.""" type: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The type of intent, either \"action\" or \"question\". Required.""" - conversation_item_ranges: List["_models.ConversationItemRange"] = rest_field( + conversation_item_ranges: list["_models.ConversationItemRange"] = rest_field( name="conversationItemRanges", visibility=["read", "create", "update", "delete", "query"] ) """The ranges of conversation items where this intent was identified. Required.""" - entities: List["_models.ConversationalAIEntity"] = rest_field( + entities: list["_models.ConversationalAIEntity"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The entities associated with this intent. Required.""" @@ -1166,8 +1171,8 @@ def __init__( *, name: str, type: str, - conversation_item_ranges: List["_models.ConversationItemRange"], - entities: List["_models.ConversationalAIEntity"], + conversation_item_ranges: list["_models.ConversationItemRange"], + entities: list["_models.ConversationalAIEntity"], ) -> None: ... @overload @@ -1190,19 +1195,19 @@ class ConversationalAIResult(_Model): :vartype warnings: list[str] """ - conversations: List["_models.ConversationalAIAnalysis"] = rest_field( + conversations: list["_models.ConversationalAIAnalysis"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Multiple multi-turn conversations analyzed. Required.""" - warnings: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + warnings: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Any warnings encountered during processing.""" @overload def __init__( self, *, - conversations: List["_models.ConversationalAIAnalysis"], - warnings: Optional[List[str]] = None, + conversations: list["_models.ConversationalAIAnalysis"], + warnings: Optional[list[str]] = None, ) -> None: ... @overload @@ -1255,7 +1260,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, kind=AnalyzeConversationInputKind.CONVERSATIONAL_AI, **kwargs) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationInputKind.CONVERSATIONAL_AI # type: ignore class ConversationalAITaskResult(AnalyzeConversationActionResult, discriminator="ConversationalAIResult"): @@ -1287,7 +1293,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, kind=AnalyzeConversationResultKind.CONVERSATIONAL_AI_RESULT, **kwargs) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationResultKind.CONVERSATIONAL_AI_RESULT # type: ignore class ConversationalPiiResult(_Model): @@ -1307,14 +1314,14 @@ class ConversationalPiiResult(_Model): id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Unique, non-empty conversation identifier. Required.""" - warnings: List["_models.InputWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + warnings: list["_models.InputWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Warnings encountered in processing the document. Required.""" statistics: Optional["_models.ConversationStatistics"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """If showStats=true was specified in the request this field will contain information about the conversation payload.""" - conversation_items: List["_models.ConversationPiiItemResult"] = rest_field( + conversation_items: list["_models.ConversationPiiItemResult"] = rest_field( name="conversationItems", visibility=["read", "create", "update", "delete", "query"] ) """List of conversationItems. Required.""" @@ -1324,8 +1331,8 @@ def __init__( self, *, id: str, # pylint: disable=redefined-builtin - warnings: List["_models.InputWarning"], - conversation_items: List["_models.ConversationPiiItemResult"], + warnings: list["_models.InputWarning"], + conversation_items: list["_models.ConversationPiiItemResult"], statistics: Optional["_models.ConversationStatistics"] = None, ) -> None: ... @@ -1449,7 +1456,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, target_project_kind=TargetProjectKind.CONVERSATION, **kwargs) + super().__init__(*args, **kwargs) + self.target_project_kind = TargetProjectKind.CONVERSATION # type: ignore class ConversationEntity(_Model): @@ -1482,11 +1490,11 @@ class ConversationEntity(_Model): """The length of the text. Required.""" confidence: float = rest_field(name="confidenceScore", visibility=["read", "create", "update", "delete", "query"]) """The entity confidence score. Required.""" - resolutions: Optional[List["_models.ResolutionBase"]] = rest_field( + resolutions: Optional[list["_models.ResolutionBase"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The collection of entity resolution objects.""" - extra_information: Optional[List["_models.ConversationEntityExtraInformation"]] = rest_field( + extra_information: Optional[list["_models.ConversationEntityExtraInformation"]] = rest_field( name="extraInformation", visibility=["read", "create", "update", "delete", "query"] ) """The collection of entity extra information objects.""" @@ -1500,8 +1508,8 @@ def __init__( offset: int, length: int, confidence: float, - resolutions: Optional[List["_models.ResolutionBase"]] = None, - extra_information: Optional[List["_models.ConversationEntityExtraInformation"]] = None, + resolutions: Optional[list["_models.ResolutionBase"]] = None, + extra_information: Optional[list["_models.ConversationEntityExtraInformation"]] = None, ) -> None: ... @overload @@ -1527,7 +1535,7 @@ class ConversationEntityExtraInformation(_Model): ~azure.ai.language.conversations.models.ExtraInformationKind """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} extra_information_kind: str = rest_discriminator( name="extraInformationKind", visibility=["read", "create", "update", "delete", "query"] ) @@ -1586,7 +1594,7 @@ class ConversationError(_Model): """A human-readable representation of the error. Required.""" target: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The target of the error.""" - details: Optional[List["_models.ConversationError"]] = rest_field( + details: Optional[list["_models.ConversationError"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """An array of details about specific errors that led to this reported error.""" @@ -1602,7 +1610,7 @@ def __init__( code: Union[str, "_models.ConversationErrorCode"], message: str, target: Optional[str] = None, - details: Optional[List["_models.ConversationError"]] = None, + details: Optional[list["_models.ConversationError"]] = None, innererror: Optional["_models.InnerErrorModel"] = None, ) -> None: ... @@ -1634,7 +1642,7 @@ class ConversationInput(_Model): :vartype domain: str or ~azure.ai.language.conversations.models.ConversationDomain """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Unique identifier for the conversation. Required.""" language: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -1805,7 +1813,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, kind=AnalyzeConversationInputKind.CONVERSATION, **kwargs) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationInputKind.CONVERSATION # type: ignore class ConversationPiiActionContent(_Model): @@ -1848,7 +1857,7 @@ class ConversationPiiActionContent(_Model): name="modelVersion", visibility=["read", "create", "update", "delete", "query"] ) """model version.""" - pii_categories: Optional[List[Union[str, "_models._enums.ConversationPiiCategories"]]] = rest_field( + pii_categories: Optional[list[Union[str, "_models._enums.ConversationPiiCategories"]]] = rest_field( name="piiCategories", visibility=["read", "create", "update", "delete", "query"] ) """Array of ConversationPIICategories.""" @@ -1871,7 +1880,7 @@ class ConversationPiiActionContent(_Model): character will be * as before. We allow specific ascii characters for redaction. Known values are: \"!\", \"#\", \"$\", \"%\", \"&\", \"*\", \"+\", \"-\", \"=\", \"?\", \"@\", \"^\", \"_\", and \"~\".""" - exclude_pii_categories: Optional[List[Union[str, "_models._enums.ConversationPiiCategoryExclusions"]]] = rest_field( + exclude_pii_categories: Optional[list[Union[str, "_models._enums.ConversationPiiCategoryExclusions"]]] = rest_field( name="excludePiiCategories", visibility=["read", "create", "update", "delete", "query"] ) """List of categories that need to be excluded instead of included.""" @@ -1886,11 +1895,11 @@ def __init__( *, logging_opt_out: Optional[bool] = None, model_version: Optional[str] = None, - pii_categories: Optional[List[Union[str, "_models._enums.ConversationPiiCategories"]]] = None, + pii_categories: Optional[list[Union[str, "_models._enums.ConversationPiiCategories"]]] = None, redact_audio_timing: Optional[bool] = None, redaction_source: Optional[Union[str, "_models._enums.TranscriptContentType"]] = None, redaction_character: Optional[Union[str, "_models._enums.RedactionCharacter"]] = None, - exclude_pii_categories: Optional[List[Union[str, "_models._enums.ConversationPiiCategoryExclusions"]]] = None, + exclude_pii_categories: Optional[list[Union[str, "_models._enums.ConversationPiiCategoryExclusions"]]] = None, redaction_policy: Optional["_models._models.BaseRedactionPolicy"] = None, ) -> None: ... @@ -1924,7 +1933,7 @@ class ConversationPiiItemResult(_Model): ) """Transcript content response that the service generates, with all necessary personally identifiable information redacted. Required.""" - entities: List["_models.NamedEntity"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + entities: list["_models.NamedEntity"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Array of Entities. Required.""" @overload @@ -1933,7 +1942,7 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin redacted_content: "_models.RedactedTranscriptContent", - entities: List["_models.NamedEntity"], + entities: list["_models.NamedEntity"], ) -> None: ... @overload @@ -1988,7 +1997,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, kind=AnalyzeConversationOperationResultsKind.PII_OPERATION_RESULTS, **kwargs) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationOperationResultsKind.PII_OPERATION_RESULTS # type: ignore class ConversationPiiResults(_Model): @@ -2004,7 +2014,7 @@ class ConversationPiiResults(_Model): :vartype conversations: list[~azure.ai.language.conversations.models.ConversationalPiiResult] """ - errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + errors: list["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Errors by document id. Required.""" statistics: Optional["_models.RequestStatistics"] = rest_field( visibility=["read", "create", "update", "delete", "query"] @@ -2012,7 +2022,7 @@ class ConversationPiiResults(_Model): """statistics.""" model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) """This field indicates which model is used for scoring. Required.""" - conversations: List["_models.ConversationalPiiResult"] = rest_field( + conversations: list["_models.ConversationalPiiResult"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """array of conversations. Required.""" @@ -2021,9 +2031,9 @@ class ConversationPiiResults(_Model): def __init__( self, *, - errors: List["_models.DocumentError"], + errors: list["_models.DocumentError"], model_version: str, - conversations: List["_models.ConversationalPiiResult"], + conversations: list["_models.ConversationalPiiResult"], statistics: Optional["_models.RequestStatistics"] = None, ) -> None: ... @@ -2051,7 +2061,7 @@ class PredictionBase(_Model): :vartype top_intent: str """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} project_kind: str = rest_discriminator( name="projectKind", visibility=["read", "create", "update", "delete", "query"] ) @@ -2095,9 +2105,9 @@ class ConversationPrediction(PredictionBase, discriminator="Conversation"): project_kind: Literal[ProjectKind.CONVERSATION] = rest_discriminator(name="projectKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Represents the prediction section of a Conversation project. Required. Conversation type""" - intents: List["_models.ConversationIntent"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + intents: list["_models.ConversationIntent"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The intent classification results. Required.""" - entities: List["_models.ConversationEntity"] = rest_field( + entities: list["_models.ConversationEntity"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The entity extraction results. Required.""" @@ -2106,8 +2116,8 @@ class ConversationPrediction(PredictionBase, discriminator="Conversation"): def __init__( self, *, - intents: List["_models.ConversationIntent"], - entities: List["_models.ConversationEntity"], + intents: list["_models.ConversationIntent"], + entities: list["_models.ConversationEntity"], top_intent: Optional[str] = None, ) -> None: ... @@ -2119,7 +2129,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, project_kind=ProjectKind.CONVERSATION, **kwargs) + super().__init__(*args, **kwargs) + self.project_kind = ProjectKind.CONVERSATION # type: ignore class ConversationRequestStatistics(_Model): @@ -2260,14 +2271,14 @@ class ConversationsSummaryResult(_Model): id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Unique, non-empty conversation identifier. Required.""" - warnings: List["_models.InputWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + warnings: list["_models.InputWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Warnings encountered in processing the document. Required.""" statistics: Optional["_models.ConversationStatistics"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """If showStats=true was specified in the request this field will contain information about the conversation payload.""" - summaries: List["_models.SummaryResultItem"] = rest_field( + summaries: list["_models.SummaryResultItem"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """array of summaries. Required.""" @@ -2277,8 +2288,8 @@ def __init__( self, *, id: str, # pylint: disable=redefined-builtin - warnings: List["_models.InputWarning"], - summaries: List["_models.SummaryResultItem"], + warnings: list["_models.InputWarning"], + summaries: list["_models.SummaryResultItem"], statistics: Optional["_models.ConversationStatistics"] = None, ) -> None: ... @@ -2368,7 +2379,7 @@ class ConversationSummarizationActionContent(_Model): ) """(NOTE: Recommended to use summaryLength over sentenceCount) Controls the approximate length of the output summaries. Known values are: \"short\", \"medium\", and \"long\".""" - summary_aspects: List[Union[str, "_models.SummaryAspect"]] = rest_field( + summary_aspects: list[Union[str, "_models.SummaryAspect"]] = rest_field( name="summaryAspects", visibility=["read", "create", "update", "delete", "query"] ) """Array of Summary Aspects. Required.""" @@ -2380,7 +2391,7 @@ class ConversationSummarizationActionContent(_Model): def __init__( self, *, - summary_aspects: List[Union[str, "_models.SummaryAspect"]], + summary_aspects: list[Union[str, "_models.SummaryAspect"]], logging_opt_out: Optional[bool] = None, model_version: Optional[str] = None, sentence_count: Optional[int] = None, @@ -2404,7 +2415,7 @@ class TargetIntentResult(_Model): """This is the base class of an intent prediction. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ConversationTargetIntentResult, NonLinkedTargetIntentResult, + ConversationTargetIntentResult, LuisTargetIntentResult, NonLinkedTargetIntentResult, QuestionAnsweringTargetIntentResult :ivar target_project_kind: This is the base class of an intent prediction. Required. Known @@ -2416,7 +2427,7 @@ class TargetIntentResult(_Model): :vartype confidence: float """ - __mapping__: Dict[str, _Model] = {} + __mapping__: dict[str, _Model] = {} target_project_kind: str = rest_discriminator( name="targetProjectKind", visibility=["read", "create", "update", "delete", "query"] ) @@ -2487,7 +2498,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, target_project_kind=TargetProjectKind.CONVERSATION, **kwargs) + super().__init__(*args, **kwargs) + self.target_project_kind = TargetProjectKind.CONVERSATION # type: ignore class CurrencyResolution(ResolutionBase, discriminator="CurrencyResolution"): @@ -2536,7 +2548,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.CURRENCY_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.CURRENCY_RESOLUTION # type: ignore class CustomConversationSummarizationActionContent(_Model): # pylint: disable=name-too-long @@ -2582,7 +2595,7 @@ class CustomConversationSummarizationActionContent(_Model): # pylint: disable=n ) """Controls the approximate length of the output summaries. Recommended to use summaryLength over sentenceCount. Known values are: \"short\", \"medium\", and \"long\".""" - summary_aspects: List[Union[str, "_models.SummaryAspect"]] = rest_field( + summary_aspects: list[Union[str, "_models.SummaryAspect"]] = rest_field( name="summaryAspects", visibility=["read", "create", "update", "delete", "query"] ) """Array of Summary Aspects. Required.""" @@ -2593,7 +2606,7 @@ def __init__( *, project_name: str, deployment_name: str, - summary_aspects: List[Union[str, "_models.SummaryAspect"]], + summary_aspects: list[Union[str, "_models.SummaryAspect"]], logging_opt_out: Optional[bool] = None, sentence_count: Optional[int] = None, string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, @@ -2649,9 +2662,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__( - *args, kind=AnalyzeConversationOperationActionKind.CUSTOM_CONVERSATIONAL_SUMMARIZATION_TASK, **kwargs - ) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationOperationActionKind.CUSTOM_CONVERSATIONAL_SUMMARIZATION_TASK # type: ignore class CustomSummarizationOperationResult( @@ -2697,9 +2709,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__( - *args, kind=AnalyzeConversationOperationResultsKind.CUSTOM_SUMMARIZATION_OPERATION_RESULTS, **kwargs - ) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationOperationResultsKind.CUSTOM_SUMMARIZATION_OPERATION_RESULTS # type: ignore class CustomSummaryResult(_Model): @@ -2719,11 +2730,11 @@ class CustomSummaryResult(_Model): :vartype deployment_name: str """ - conversations: List["_models.ConversationsSummaryResult"] = rest_field( + conversations: list["_models.ConversationsSummaryResult"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """array of conversations. Required.""" - errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + errors: list["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Errors by document id. Required.""" statistics: Optional["_models.RequestStatistics"] = rest_field( visibility=["read", "create", "update", "delete", "query"] @@ -2739,8 +2750,8 @@ class CustomSummaryResult(_Model): def __init__( self, *, - conversations: List["_models.ConversationsSummaryResult"], - errors: List["_models.DocumentError"], + conversations: list["_models.ConversationsSummaryResult"], + errors: list["_models.DocumentError"], project_name: str, deployment_name: str, statistics: Optional["_models.RequestStatistics"] = None, @@ -2820,7 +2831,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.DATE_TIME_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.DATE_TIME_RESOLUTION # type: ignore class DocumentError(_Model): @@ -2880,7 +2892,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, policy_kind=RedactionPolicyKind.ENTITY_MASK, **kwargs) + super().__init__(*args, **kwargs) + self.policy_kind = RedactionPolicyKind.ENTITY_MASK # type: ignore class EntitySubtype(ConversationEntityExtraInformation, discriminator="EntitySubtype"): @@ -2900,7 +2913,7 @@ class EntitySubtype(ConversationEntityExtraInformation, discriminator="EntitySub """The extra information object kind. Required. Entity subtype extra information kind""" value: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The Subtype of an extracted entity type.""" - tags: Optional[List["_models.EntityTag"]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + tags: Optional[list["_models.EntityTag"]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of entity tags. Tags express similarities between entity categories for the extracted entity type.""" @@ -2909,7 +2922,7 @@ def __init__( self, *, value: Optional[str] = None, - tags: Optional[List["_models.EntityTag"]] = None, + tags: Optional[list["_models.EntityTag"]] = None, ) -> None: ... @overload @@ -2920,7 +2933,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, extra_information_kind=ExtraInformationKind.ENTITY_SUBTYPE, **kwargs) + super().__init__(*args, **kwargs) + self.extra_information_kind = ExtraInformationKind.ENTITY_SUBTYPE # type: ignore class EntityTag(_Model): @@ -3027,7 +3041,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.INFORMATION_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.INFORMATION_RESOLUTION # type: ignore class InnerErrorModel(_Model): @@ -3062,7 +3077,7 @@ class InnerErrorModel(_Model): \"InvalidDocumentBatch\", \"UnsupportedLanguageCode\", and \"InvalidCountryHint\".""" message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Error message. Required.""" - details: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + details: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Error details.""" target: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Error target.""" @@ -3077,7 +3092,7 @@ def __init__( *, code: Union[str, "_models.InnerErrorCode"], message: str, - details: Optional[Dict[str, str]] = None, + details: Optional[dict[str, str]] = None, target: Optional[str] = None, innererror: Optional["_models.InnerErrorModel"] = None, ) -> None: ... @@ -3197,7 +3212,7 @@ class KnowledgeBaseAnswer(_Model): :vartype short_answer: ~azure.ai.language.conversations.models.AnswerSpan """ - questions: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + questions: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of questions associated with the answer.""" answer: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Answer text.""" @@ -3209,7 +3224,7 @@ class KnowledgeBaseAnswer(_Model): """ID of the QnA result.""" source: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Source of QnA result.""" - metadata: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + metadata: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Metadata associated with the answer, useful to categorize or filter question answers.""" dialog: Optional["_models.KnowledgeBaseAnswerDialog"] = rest_field( visibility=["read", "create", "update", "delete", "query"] @@ -3224,12 +3239,12 @@ class KnowledgeBaseAnswer(_Model): def __init__( self, *, - questions: Optional[List[str]] = None, + questions: Optional[list[str]] = None, answer: Optional[str] = None, confidence: Optional[float] = None, qna_id: Optional[int] = None, source: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, + metadata: Optional[dict[str, str]] = None, dialog: Optional["_models.KnowledgeBaseAnswerDialog"] = None, short_answer: Optional["_models.AnswerSpan"] = None, ) -> None: ... @@ -3297,7 +3312,7 @@ class KnowledgeBaseAnswerDialog(_Model): """To mark if a prompt is relevant only with a previous question or not. If true, do not include this QnA as search result for queries without context; otherwise, if false, ignores context and includes this QnA in search result.""" - prompts: Optional[List["_models.KnowledgeBaseAnswerPrompt"]] = rest_field( + prompts: Optional[list["_models.KnowledgeBaseAnswerPrompt"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """List of prompts associated with the answer.""" @@ -3307,7 +3322,7 @@ def __init__( self, *, is_context_only: Optional[bool] = None, - prompts: Optional[List["_models.KnowledgeBaseAnswerPrompt"]] = None, + prompts: Optional[list["_models.KnowledgeBaseAnswerPrompt"]] = None, ) -> None: ... @overload @@ -3403,7 +3418,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.LENGTH_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.LENGTH_RESOLUTION # type: ignore class ListKey(ConversationEntityExtraInformation, discriminator="ListKey"): @@ -3436,7 +3452,156 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, extra_information_kind=ExtraInformationKind.LIST_KEY, **kwargs) + super().__init__(*args, **kwargs) + self.extra_information_kind = ExtraInformationKind.LIST_KEY # type: ignore + + +class LuisCallingConfig(_Model): + """This customizes how the service calls LUIS Generally Available projects. + + :ivar verbose: Enable verbose response. + :vartype verbose: bool + :ivar log: Save log to add in training utterances later. + :vartype log: bool + :ivar show_all_intents: Set true to show all intents. + :vartype show_all_intents: bool + :ivar timezone_offset: The timezone offset for the location of the request. + :vartype timezone_offset: int + :ivar spell_check: Enable spell checking. + :vartype spell_check: bool + :ivar bing_spell_check_subscription_key: The subscription key to use when enabling Bing spell + check. + :vartype bing_spell_check_subscription_key: str + """ + + verbose: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Enable verbose response.""" + log: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Save log to add in training utterances later.""" + show_all_intents: Optional[bool] = rest_field( + name="show-all-intents", visibility=["read", "create", "update", "delete", "query"] + ) + """Set true to show all intents.""" + timezone_offset: Optional[int] = rest_field( + name="timezoneOffset", visibility=["read", "create", "update", "delete", "query"] + ) + """The timezone offset for the location of the request.""" + spell_check: Optional[bool] = rest_field( + name="spellCheck", visibility=["read", "create", "update", "delete", "query"] + ) + """Enable spell checking.""" + bing_spell_check_subscription_key: Optional[str] = rest_field( + name="bing-spell-check-subscription-key", visibility=["read", "create", "update", "delete", "query"] + ) + """The subscription key to use when enabling Bing spell check.""" + + @overload + def __init__( + self, + *, + verbose: Optional[bool] = None, + log: Optional[bool] = None, + show_all_intents: Optional[bool] = None, + timezone_offset: Optional[int] = None, + spell_check: Optional[bool] = None, + bing_spell_check_subscription_key: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LuisConfig(AnalysisConfig, discriminator="Luis"): + """This is a set of request parameters for LUIS Generally Available projects. + + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str + :ivar target_project_kind: The type of a target service. Required. Luis target service type + :vartype target_project_kind: str or ~azure.ai.language.conversations.models.LUIS + :ivar query: The utterance to predict. + :vartype query: str + :ivar calling_options: This customizes how the service calls LUIS Generally Available projects. + :vartype calling_options: ~azure.ai.language.conversations.models.LuisCallingConfig + """ + + target_project_kind: Literal[TargetProjectKind.LUIS] = rest_discriminator(name="targetProjectKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of a target service. Required. Luis target service type""" + query: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The utterance to predict.""" + calling_options: Optional["_models.LuisCallingConfig"] = rest_field( + name="callingOptions", visibility=["read", "create", "update", "delete", "query"] + ) + """This customizes how the service calls LUIS Generally Available projects.""" + + @overload + def __init__( + self, + *, + api_version: Optional[str] = None, + query: Optional[str] = None, + calling_options: Optional["_models.LuisCallingConfig"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.target_project_kind = TargetProjectKind.LUIS # type: ignore + + +class LuisResult(_Model): + """It is the response from a LUIS Generally Available application.""" + + +class LuisTargetIntentResult(TargetIntentResult, discriminator="Luis"): + """It is a wrap up of LUIS Generally Available response. + + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence: The prediction score and it ranges from 0.0 to 1.0. Required. + :vartype confidence: float + :ivar target_project_kind: Kind of the project. Required. Luis target service type + :vartype target_project_kind: str or ~azure.ai.language.conversations.models.LUIS + :ivar result: The actual response from a LUIS Generally Available application. + :vartype result: ~azure.ai.language.conversations.models.LuisResult + """ + + target_project_kind: Literal[TargetProjectKind.LUIS] = rest_discriminator(name="targetProjectKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the project. Required. Luis target service type""" + result: Optional["_models.LuisResult"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The actual response from a LUIS Generally Available application.""" + + @overload + def __init__( + self, + *, + confidence: float, + api_version: Optional[str] = None, + result: Optional["_models.LuisResult"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.target_project_kind = TargetProjectKind.LUIS # type: ignore class MetadataFilter(_Model): @@ -3449,7 +3614,7 @@ class MetadataFilter(_Model): :vartype logical_operation: str or ~azure.ai.language.conversations.models.LogicalOperationKind """ - metadata: Optional[List["_models.MetadataRecord"]] = rest_field( + metadata: Optional[list["_models.MetadataRecord"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """List of metadata.""" @@ -3462,7 +3627,7 @@ class MetadataFilter(_Model): def __init__( self, *, - metadata: Optional[List["_models.MetadataRecord"]] = None, + metadata: Optional[list["_models.MetadataRecord"]] = None, logical_operation: Optional[Union[str, "_models.LogicalOperationKind"]] = None, ) -> None: ... @@ -3517,7 +3682,7 @@ class MultiLanguageConversationInput(_Model): :vartype conversations: list[~azure.ai.language.conversations.models.ConversationInput] """ - conversations: List["_models.ConversationInput"] = rest_field( + conversations: list["_models.ConversationInput"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Array of conversation items. Required.""" @@ -3526,7 +3691,7 @@ class MultiLanguageConversationInput(_Model): def __init__( self, *, - conversations: List["_models.ConversationInput"], + conversations: list["_models.ConversationInput"], ) -> None: ... @overload @@ -3643,7 +3808,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, policy_kind=RedactionPolicyKind.NO_MASK, **kwargs) + super().__init__(*args, **kwargs) + self.policy_kind = RedactionPolicyKind.NO_MASK # type: ignore class NonLinkedTargetIntentResult(TargetIntentResult, discriminator="NonLinked"): @@ -3684,7 +3850,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, target_project_kind=TargetProjectKind.NON_LINKED, **kwargs) + super().__init__(*args, **kwargs) + self.target_project_kind = TargetProjectKind.NON_LINKED # type: ignore class NumberResolution(ResolutionBase, discriminator="NumberResolution"): @@ -3726,7 +3893,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.NUMBER_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.NUMBER_RESOLUTION # type: ignore class NumericRangeResolution(ResolutionBase, discriminator="NumericRangeResolution"): @@ -3776,7 +3944,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.NUMERIC_RANGE_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.NUMERIC_RANGE_RESOLUTION # type: ignore class OrchestrationPrediction(PredictionBase, discriminator="Orchestration"): @@ -3795,7 +3964,7 @@ class OrchestrationPrediction(PredictionBase, discriminator="Orchestration"): project_kind: Literal[ProjectKind.ORCHESTRATION] = rest_discriminator(name="projectKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """This represents the prediction result of an Orchestration project. Required. Orchestration type""" - intents: Dict[str, "_models.TargetIntentResult"] = rest_field( + intents: dict[str, "_models.TargetIntentResult"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A dictionary that contains all intents. A key is an intent name and a value is its confidence @@ -3806,7 +3975,7 @@ class OrchestrationPrediction(PredictionBase, discriminator="Orchestration"): def __init__( self, *, - intents: Dict[str, "_models.TargetIntentResult"], + intents: dict[str, "_models.TargetIntentResult"], top_intent: Optional[str] = None, ) -> None: ... @@ -3818,7 +3987,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, project_kind=ProjectKind.ORCHESTRATION, **kwargs) + super().__init__(*args, **kwargs) + self.project_kind = ProjectKind.ORCHESTRATION # type: ignore class OrdinalResolution(ResolutionBase, discriminator="OrdinalResolution"): @@ -3867,7 +4037,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.ORDINAL_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.ORDINAL_RESOLUTION # type: ignore class PiiOperationAction(AnalyzeConversationOperationAction, discriminator="ConversationalPIITask"): @@ -3905,7 +4076,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, kind=AnalyzeConversationOperationActionKind.CONVERSATIONAL_PII_TASK, **kwargs) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationOperationActionKind.CONVERSATIONAL_PII_TASK # type: ignore class QueryFilters(_Model): @@ -3924,7 +4096,7 @@ class QueryFilters(_Model): name="metadataFilter", visibility=["read", "create", "update", "delete", "query"] ) """filters over knowledge base.""" - source_filter: Optional[List[str]] = rest_field( + source_filter: Optional[list[str]] = rest_field( name="sourceFilter", visibility=["read", "create", "update", "delete", "query"] ) """filters over knowledge base.""" @@ -3939,7 +4111,7 @@ def __init__( self, *, metadata_filter: Optional["_models.MetadataFilter"] = None, - source_filter: Optional[List[str]] = None, + source_filter: Optional[list[str]] = None, logical_operation: Optional[Union[str, "_models.LogicalOperationKind"]] = None, ) -> None: ... @@ -3990,7 +4162,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, target_project_kind=TargetProjectKind.QUESTION_ANSWERING, **kwargs) + super().__init__(*args, **kwargs) + self.target_project_kind = TargetProjectKind.QUESTION_ANSWERING # type: ignore class QuestionAnsweringTargetIntentResult(TargetIntentResult, discriminator="QuestionAnswering"): @@ -4030,7 +4203,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, target_project_kind=TargetProjectKind.QUESTION_ANSWERING, **kwargs) + super().__init__(*args, **kwargs) + self.target_project_kind = TargetProjectKind.QUESTION_ANSWERING # type: ignore class QuestionAnswersConfig(_Model): @@ -4146,7 +4320,7 @@ class RedactedTranscriptContent(_Model): """Redacted output for input in text (Microsoft's speech-to-text 'display') format.""" lexical: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Redacted output for input in lexical format.""" - audio_timings: Optional[List["_models.AudioTiming"]] = rest_field( + audio_timings: Optional[list["_models.AudioTiming"]] = rest_field( name="audioTimings", visibility=["read", "create", "update", "delete", "query"] ) """List of redacted audio segments.""" @@ -4159,7 +4333,7 @@ def __init__( masked_inverse_text_normalized: Optional[str] = None, text: Optional[str] = None, lexical: Optional[str] = None, - audio_timings: Optional[List["_models.AudioTiming"]] = None, + audio_timings: Optional[list["_models.AudioTiming"]] = None, ) -> None: ... @overload @@ -4216,7 +4390,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, extra_information_kind=ExtraInformationKind.REGEX_KEY, **kwargs) + super().__init__(*args, **kwargs) + self.extra_information_kind = ExtraInformationKind.REGEX_KEY # type: ignore class RequestStatistics(_Model): @@ -4358,7 +4533,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.SPEED_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.SPEED_RESOLUTION # type: ignore class SummarizationOperationAction(AnalyzeConversationOperationAction, discriminator="ConversationalSummarizationTask"): @@ -4396,7 +4572,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, kind=AnalyzeConversationOperationActionKind.CONVERSATIONAL_SUMMARIZATION_TASK, **kwargs) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationOperationActionKind.CONVERSATIONAL_SUMMARIZATION_TASK # type: ignore class SummarizationOperationResult( @@ -4441,7 +4618,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, kind=AnalyzeConversationOperationResultsKind.SUMMARIZATION_OPERATION_RESULTS, **kwargs) + super().__init__(*args, **kwargs) + self.kind = AnalyzeConversationOperationResultsKind.SUMMARIZATION_OPERATION_RESULTS # type: ignore class SummaryResult(_Model): @@ -4458,11 +4636,11 @@ class SummaryResult(_Model): :vartype model_version: str """ - conversations: List["_models.ConversationsSummaryResult"] = rest_field( + conversations: list["_models.ConversationsSummaryResult"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """array of conversations. Required.""" - errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + errors: list["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Errors by document id. Required.""" statistics: Optional["_models.RequestStatistics"] = rest_field( visibility=["read", "create", "update", "delete", "query"] @@ -4475,8 +4653,8 @@ class SummaryResult(_Model): def __init__( self, *, - conversations: List["_models.ConversationsSummaryResult"], - errors: List["_models.DocumentError"], + conversations: list["_models.ConversationsSummaryResult"], + errors: list["_models.DocumentError"], model_version: str, statistics: Optional["_models.RequestStatistics"] = None, ) -> None: ... @@ -4507,7 +4685,7 @@ class SummaryResultItem(_Model): """aspect. Required.""" text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """text. Required.""" - contexts: Optional[List["_models.ItemizedSummaryContext"]] = rest_field( + contexts: Optional[list["_models.ItemizedSummaryContext"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Context list of the summary.""" @@ -4518,7 +4696,7 @@ def __init__( *, aspect: str, text: str, - contexts: Optional[List["_models.ItemizedSummaryContext"]] = None, + contexts: Optional[list["_models.ItemizedSummaryContext"]] = None, ) -> None: ... @overload @@ -4570,7 +4748,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.TEMPERATURE_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.TEMPERATURE_RESOLUTION # type: ignore class TemporalSpanResolution(ResolutionBase, discriminator="TemporalSpanResolution"): @@ -4651,7 +4830,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.TEMPORAL_SPAN_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.TEMPORAL_SPAN_RESOLUTION # type: ignore class TextConversation(ConversationInput, discriminator="text"): @@ -4672,7 +4852,7 @@ class TextConversation(ConversationInput, discriminator="text"): modality: Literal[InputModality.TEXT] = rest_discriminator(name="modality", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """modality discriminator. Required. Text input modality""" - conversation_items: List["_models.TextConversationItem"] = rest_field( + conversation_items: list["_models.TextConversationItem"] = rest_field( name="conversationItems", visibility=["read", "create", "update", "delete", "query"] ) """Ordered list of text conversation items in the conversation. Required.""" @@ -4683,7 +4863,7 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin language: str, - conversation_items: List["_models.TextConversationItem"], + conversation_items: list["_models.TextConversationItem"], domain: Optional[Union[str, "_models.ConversationDomain"]] = None, ) -> None: ... @@ -4695,7 +4875,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, modality=InputModality.TEXT, **kwargs) + super().__init__(*args, **kwargs) + self.modality = InputModality.TEXT # type: ignore class TextConversationItem(_Model): @@ -4776,7 +4957,7 @@ class TranscriptConversation(ConversationInput, discriminator="transcript"): modality: Literal[InputModality.TRANSCRIPT] = rest_discriminator(name="modality", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """modality discriminator. Required. Transcript input modality""" - conversation_items: List["_models._models.TranscriptConversationItem"] = rest_field( + conversation_items: list["_models._models.TranscriptConversationItem"] = rest_field( name="conversationItems", visibility=["read", "create", "update", "delete", "query"] ) """Ordered list of transcript conversation items in the conversation. Required.""" @@ -4787,7 +4968,7 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin language: str, - conversation_items: List["_models._models.TranscriptConversationItem"], + conversation_items: list["_models._models.TranscriptConversationItem"], domain: Optional[Union[str, "_models.ConversationDomain"]] = None, ) -> None: ... @@ -4799,7 +4980,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, modality=InputModality.TRANSCRIPT, **kwargs) + super().__init__(*args, **kwargs) + self.modality = InputModality.TRANSCRIPT # type: ignore class TranscriptConversationItem(_Model): @@ -4867,7 +5049,7 @@ class TranscriptConversationItem(_Model): lexical: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Lexical form of the recognized text from the speech-to-text API, with the actual words recognized. Required.""" - word_level_timings: Optional[List["_models._models.WordLevelTiming"]] = rest_field( + word_level_timings: Optional[list["_models._models.WordLevelTiming"]] = rest_field( name="wordLevelTimings", visibility=["read", "create", "update", "delete", "query"] ) """List of word-level audio timing information.""" @@ -4890,7 +5072,7 @@ def __init__( language: Optional[str] = None, modality: Optional[Union[str, "_models.InputModality"]] = None, role: Optional[Union[str, "_models.ParticipantRole"]] = None, - word_level_timings: Optional[List["_models._models.WordLevelTiming"]] = None, + word_level_timings: Optional[list["_models._models.WordLevelTiming"]] = None, conversation_item_level_timing: Optional["_models._models.ConversationItemLevelTiming"] = None, ) -> None: ... @@ -4949,7 +5131,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.VOLUME_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.VOLUME_RESOLUTION # type: ignore class WeightResolution(ResolutionBase, discriminator="WeightResolution"): @@ -4992,7 +5175,8 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, resolution_kind=ResolutionKind.WEIGHT_RESOLUTION, **kwargs) + super().__init__(*args, **kwargs) + self.resolution_kind = ResolutionKind.WEIGHT_RESOLUTION # type: ignore class WordLevelTiming(_Model): diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py index c4c8835375ca..87676c65a8f0 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py @@ -7,19 +7,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from ._models import ( - AnalyzeConversationOperationInput, - MultiLanguageConversationInput, - SummarizationOperationAction, - ConversationSummarizationActionContent, - AnalyzeConversationOperationAction, - ConversationPiiActionContent, - PiiOperationAction, - CharacterMaskPolicyType, - EntityMaskTypePolicyType, - NoMaskPolicyType, -) -from ._enums import RedactionCharacter + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -29,18 +19,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -__all__ = [ - "AnalyzeConversationOperationInput", - "MultiLanguageConversationInput", - "SummarizationOperationAction", - "ConversationSummarizationActionContent", - "AnalyzeConversationOperationAction", - "ConversationPiiActionContent", - "PiiOperationAction", - "CharacterMaskPolicyType", - "RedactionCharacter", - "EntityMaskTypePolicyType", - "NoMaskPolicyType", -] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversational_ai_task.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversational_ai_task.py new file mode 100644 index 000000000000..da3831d99f2e --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversational_ai_task.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.conversations import ConversationAnalysis + +""" +# PREREQUISITES + pip install azure-ai-language-conversations +# USAGE + python successful_analyze_conversational_ai_task.py +""" + + +def main(): + client = ConversationAnalysis( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_conversation( + body={ + "analysisInput": { + "conversations": [ + { + "conversationItems": [ + {"id": "1", "participantId": "user", "text": "Hi!"}, + {"id": "2", "participantId": "bot", "text": "Hello, how can I help you?"}, + { + "id": "3", + "participantId": "user", + "text": "I would like to book a flight from Paris to Berlin on Oct 10th.", + }, + {"id": "4", "participantId": "bot", "text": "Do you have any airline preference?"}, + {"id": "5", "participantId": "user", "text": "No."}, + {"id": "6", "participantId": "user", "text": "I like New York and Boston."}, + { + "id": "7", + "participantId": "user", + "text": "Actualy, change the destination to New York.", + }, + {"id": "8", "participantId": "user", "text": "Wait, I do not like the food in New York."}, + {"id": "9", "participantId": "user", "text": "Ok, change the destination back to Berlin."}, + ], + "id": "order", + "language": "en", + "modality": "text", + } + ] + }, + "kind": "ConversationalAI", + "parameters": { + "deploymentName": "deployment1", + "projectName": "project1", + "stringIndexType": "Utf16CodeUnit", + }, + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversationalAITask.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations.py new file mode 100644 index 000000000000..ccd626af6854 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.conversations import ConversationAnalysis + +""" +# PREREQUISITES + pip install azure-ai-language-conversations +# USAGE + python successful_analyze_conversations.py +""" + + +def main(): + client = ConversationAnalysis( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_conversation( + body={ + "analysisInput": { + "conversationItem": {"id": "1", "participantId": "1", "text": "Book a flight to Seattle on Oct 10th"} + }, + "kind": "Conversation", + "parameters": { + "deploymentName": "{deployment-name}", + "projectName": "{project-name}", + "stringIndexType": "TextElement_V8", + }, + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversations.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration.py new file mode 100644 index 000000000000..bdfe88e0653f --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.conversations import ConversationAnalysis + +""" +# PREREQUISITES + pip install azure-ai-language-conversations +# USAGE + python successful_analyze_conversations_arbitration.py +""" + + +def main(): + client = ConversationAnalysis( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_conversation( + body={ + "analysisInput": { + "conversationItem": { + "id": "1", + "language": "en-GB", + "modality": "text", + "participantId": "1", + "text": "How do I integrate QnA Maker and LUIS?", + } + }, + "kind": "Conversation", + "parameters": { + "deploymentName": "{deployment-name}", + "isLoggingEnabled": False, + "projectName": "{project-name}", + "stringIndexType": "TextElement_V8", + "verbose": True, + }, + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversationsArbitration.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration_direct_target.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration_direct_target.py new file mode 100644 index 000000000000..030bca19568e --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration_direct_target.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.conversations import ConversationAnalysis + +""" +# PREREQUISITES + pip install azure-ai-language-conversations +# USAGE + python successful_analyze_conversations_arbitration_direct_target.py +""" + + +def main(): + client = ConversationAnalysis( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_conversation( + body={ + "analysisInput": {"conversationItem": {"id": "1", "participantId": "1", "text": "Ports and connectors"}}, + "kind": "Conversation", + "parameters": { + "deploymentName": "dep1", + "directTarget": "qnaProject", + "projectName": "prj1", + "targetProjectParameters": { + "qnaProject": { + "callingOptions": { + "context": {"previousQnaId": 4, "previousUserQuery": "Meet Surface Pro 4"}, + "question": "App Service overview", + "top": 1, + }, + "targetProjectKind": "QuestionAnswering", + } + }, + }, + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversationsArbitrationDirectTarget.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_jobs_cancel_request.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_jobs_cancel_request.py new file mode 100644 index 000000000000..5555747fd6ab --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_jobs_cancel_request.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.conversations import ConversationAnalysis + +""" +# PREREQUISITES + pip install azure-ai-language-conversations +# USAGE + python successful_analyze_conversations_jobs_cancel_request.py +""" + + +def main(): + client = ConversationAnalysis( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + client.begin_cancel_job( + job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", + ).result() + + +# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversationsJobsCancelRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/conftest.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/conftest.py new file mode 100644 index 000000000000..2af8caaf96fb --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/conftest.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import os +import pytest +from dotenv import load_dotenv +from devtools_testutils import ( + test_proxy, + add_general_regex_sanitizer, + add_body_key_sanitizer, + add_header_regex_sanitizer, +) + +load_dotenv() + + +# For security, please avoid record sensitive identity information in recordings +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + conversationanalysis_subscription_id = os.environ.get( + "CONVERSATIONANALYSIS_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000" + ) + conversationanalysis_tenant_id = os.environ.get( + "CONVERSATIONANALYSIS_TENANT_ID", "00000000-0000-0000-0000-000000000000" + ) + conversationanalysis_client_id = os.environ.get( + "CONVERSATIONANALYSIS_CLIENT_ID", "00000000-0000-0000-0000-000000000000" + ) + conversationanalysis_client_secret = os.environ.get( + "CONVERSATIONANALYSIS_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer( + regex=conversationanalysis_subscription_id, value="00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer(regex=conversationanalysis_tenant_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=conversationanalysis_client_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=conversationanalysis_client_secret, value="00000000-0000-0000-0000-000000000000") + + add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") + add_header_regex_sanitizer(key="Cookie", value="cookie;") + add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis.py new file mode 100644 index 000000000000..187320005502 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import ConversationAnalysisPreparer, ConversationAnalysisTestBase + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestConversationAnalysis(ConversationAnalysisTestBase): + @ConversationAnalysisPreparer() + @recorded_by_proxy + def test_analyze_conversation(self, conversationanalysis_endpoint): + client = self.create_client(endpoint=conversationanalysis_endpoint) + response = client.analyze_conversation( + body={ + "analysisInput": { + "conversationItem": { + "id": "str", + "participantId": "str", + "text": "str", + "language": "str", + "modality": "str", + "role": "str", + } + }, + "kind": "Conversation", + "parameters": { + "deploymentName": "str", + "projectName": "str", + "directTarget": "str", + "isLoggingEnabled": bool, + "stringIndexType": "str", + "targetProjectParameters": {"str": "analysis_config"}, + "verbose": bool, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @ConversationAnalysisPreparer() + @recorded_by_proxy + def test_begin_cancel_job(self, conversationanalysis_endpoint): + client = self.create_client(endpoint=conversationanalysis_endpoint) + response = client.begin_cancel_job( + job_id="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis_async.py new file mode 100644 index 000000000000..dc57f33515bd --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis_async.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import ConversationAnalysisPreparer +from testpreparer_async import ConversationAnalysisTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestConversationAnalysisAsync(ConversationAnalysisTestBaseAsync): + @ConversationAnalysisPreparer() + @recorded_by_proxy_async + async def test_analyze_conversation(self, conversationanalysis_endpoint): + client = self.create_async_client(endpoint=conversationanalysis_endpoint) + response = await client.analyze_conversation( + body={ + "analysisInput": { + "conversationItem": { + "id": "str", + "participantId": "str", + "text": "str", + "language": "str", + "modality": "str", + "role": "str", + } + }, + "kind": "Conversation", + "parameters": { + "deploymentName": "str", + "projectName": "str", + "directTarget": "str", + "isLoggingEnabled": bool, + "stringIndexType": "str", + "targetProjectParameters": {"str": "analysis_config"}, + "verbose": bool, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @ConversationAnalysisPreparer() + @recorded_by_proxy_async + async def test_begin_cancel_job(self, conversationanalysis_endpoint): + client = self.create_async_client(endpoint=conversationanalysis_endpoint) + response = await ( + await client.begin_cancel_job( + job_id="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer.py new file mode 100644 index 000000000000..3c1ba7cefde7 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.ai.language.conversations import ConversationAnalysis +from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer +import functools + + +class ConversationAnalysisTestBase(AzureRecordedTestCase): + + def create_client(self, endpoint): + credential = self.get_credential(ConversationAnalysis) + return self.create_client_from_credential( + ConversationAnalysis, + credential=credential, + endpoint=endpoint, + ) + + +ConversationAnalysisPreparer = functools.partial( + PowerShellPreparer, + "conversationanalysis", + conversationanalysis_endpoint="https://fake_conversationanalysis_endpoint.com", +) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer_async.py new file mode 100644 index 000000000000..d646a8a6be59 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer_async.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.ai.language.conversations.aio import ConversationAnalysis +from devtools_testutils import AzureRecordedTestCase + + +class ConversationAnalysisTestBaseAsync(AzureRecordedTestCase): + + def create_async_client(self, endpoint): + credential = self.get_credential(ConversationAnalysis, is_async=True) + return self.create_client_from_credential( + ConversationAnalysis, + credential=credential, + endpoint=endpoint, + ) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/pyproject.toml b/sdk/cognitivelanguage/azure-ai-language-conversations/pyproject.toml index ead988c6ed27..af4964e66aeb 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/pyproject.toml +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/pyproject.toml @@ -1,18 +1,14 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - [build-system] -requires = ["setuptools>=77.0.3", "wheel"] +requires = [ + "setuptools>=77.0.3", + "wheel", +] build-backend = "setuptools.build_meta" [project] name = "azure-ai-language-conversations" authors = [ - { name = "Microsoft Corporation", email = "azpysdkhelp@microsoft.com" }, + { name = "Microsoft Corporation", email = "azpysdkhelp@microsoft.com" }, ] description = "Microsoft Corporation Azure Ai Language Conversations Client Library for Python" license = "MIT" @@ -28,28 +24,39 @@ classifiers = [ "Programming Language :: Python :: 3.13", ] requires-python = ">=3.9" -keywords = ["azure", "azure sdk"] - +keywords = [ + "azure", + "azure sdk", +] dependencies = [ "isodate>=0.6.1", "azure-core>=1.35.0", "typing-extensions>=4.6.0", ] dynamic = [ -"version", "readme" + "version", + "readme", ] [project.urls] repository = "https://github.com/Azure/azure-sdk-for-python" -[tool.setuptools.dynamic] -version = {attr = "azure.ai.language.conversations._version.VERSION"} -readme = {file = ["README.md", "CHANGELOG.md"], content-type = "text/markdown"} +[tool.setuptools.dynamic.version] +attr = "azure.ai.language.conversations._version.VERSION" + +[tool.setuptools.dynamic.readme] +file = [ + "README.md", + "CHANGELOG.md", +] +content-type = "text/markdown" [tool.setuptools.packages.find] exclude = [ "tests*", + "generated_tests*", "samples*", + "generated_samples*", "doc*", "azure", "azure.ai", @@ -57,4 +64,9 @@ exclude = [ ] [tool.setuptools.package-data] -pytyped = ["py.typed"] +pytyped = [ + "py.typed", +] + +[packaging] +auto_update = false diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_multi_turn_prediction_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_multi_turn_prediction_async.py index b2fc57698c05..3e00228b12ce 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_multi_turn_prediction_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_multi_turn_prediction_async.py @@ -107,38 +107,38 @@ async def sample_conversation_multi_turn_prediction_async(): # Intents print("Intents:") for intent in conversation.intents or []: - print(f" Name: {intent.name}") - print(f" Type: {intent.type}") - - print(" Conversation Item Ranges:") - for rng in intent.conversation_item_ranges or []: - print(f" - Offset: {rng.offset}, Count: {rng.count}") - - print("\n Entities (Scoped to Intent):") - for ent in intent.entities or []: - print(f" Name: {ent.name}") - print(f" Text: {ent.text}") - print(f" Confidence: {ent.confidence_score}") - print(f" Offset: {ent.offset}, Length: {ent.length}") - print( - f" Conversation Item ID: {ent.conversation_item_id}, " - f"Index: {ent.conversation_item_index}" - ) - - # Date/time resolutions - for res in ent.resolutions or []: - if isinstance(res, DateTimeResolution): - print( - f" - [DateTimeResolution] SubKind: {res.date_time_sub_kind}, " - f"Timex: {res.timex}, Value: {res.value}" - ) - - # Extra information (entity subtype + tags) - for extra in ent.extra_information or []: - if isinstance(extra, EntitySubtype): - print(f" - [EntitySubtype] Value: {extra.value}") - for tag in extra.tags or []: - print(f" • Tag: {tag.name}, Confidence: {tag.confidence_score}") + print(f" Name: {intent.name}") + print(f" Type: {intent.type}") + + print(" Conversation Item Ranges:") + for rng in intent.conversation_item_ranges or []: + print(f" - Offset: {rng.offset}, Count: {rng.count}") + + print("\n Entities (Scoped to Intent):") + for ent in intent.entities or []: + print(f" Name: {ent.name}") + print(f" Text: {ent.text}") + print(f" Confidence: {ent.confidence_score}") + print(f" Offset: {ent.offset}, Length: {ent.length}") + print( + f" Conversation Item ID: {ent.conversation_item_id}, " + f"Index: {ent.conversation_item_index}" + ) + + # Date/time resolutions + for res in ent.resolutions or []: + if isinstance(res, DateTimeResolution): + print( + f" - [DateTimeResolution] SubKind: {res.date_time_sub_kind}, " + f"Timex: {res.timex}, Value: {res.value}" + ) + + # Extra information (entity subtype + tags) + for extra in ent.extra_information or []: + if isinstance(extra, EntitySubtype): + print(f" - [EntitySubtype] Value: {extra.value}") + for tag in extra.tags or []: + print(f" • Tag: {tag.name}, Confidence: {tag.confidence_score}") print() # Global entities @@ -149,8 +149,7 @@ async def sample_conversation_multi_turn_prediction_async(): print(f" Confidence: {ent.confidence_score}") print(f" Offset: {ent.offset}, Length: {ent.length}") print( - f" Conversation Item ID: {ent.conversation_item_id}, " - f"Index: {ent.conversation_item_index}" + f" Conversation Item ID: {ent.conversation_item_id}, " f"Index: {ent.conversation_item_index}" ) for extra in ent.extra_information or []: @@ -161,11 +160,15 @@ async def sample_conversation_multi_turn_prediction_async(): print("-" * 40) else: print("No Conversational AI result returned.") + + # [END conversation_multi_turn_prediction_async] + async def main(): await sample_conversation_multi_turn_prediction_async() + if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main()) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_async.py index 13fd275fa4e3..5c06268a4239 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_async.py @@ -159,12 +159,15 @@ async def sample_conversation_pii_async(): print("\nErrors:") for err in d["errors"]: print(f" Code: {err.code} - {err.message}") + + # [END conversation_pii_async] async def main(): await sample_conversation_pii_async() + if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main()) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_character_mask_policy_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_character_mask_policy_async.py index 9c7fa9b442a1..a0c16bb330fa 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_character_mask_policy_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_character_mask_policy_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. @@ -136,8 +137,11 @@ async def sample_conversation_pii_with_character_mask_policy_async(): if "*" in redacted_text: redacted_verified.append(redacted_text) + + # [END conversation_pii_with_character_mask_policy_async] + async def main(): await sample_conversation_pii_with_character_mask_policy_async() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_entity_mask_policy_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_entity_mask_policy_async.py index 089ef66afc4b..0ca73229c488 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_entity_mask_policy_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_entity_mask_policy_async.py @@ -137,9 +137,7 @@ async def sample_conversation_pii_with_entity_mask_policy_async(): all_ok = False # 2) mask should appear like [Person] or [Person-1] expected_mask_pattern = rf"\[{re.escape(entity.category)}-?\d*\]" - if not re.search( - expected_mask_pattern, redacted_text, flags=re.IGNORECASE - ): + if not re.search(expected_mask_pattern, redacted_text, flags=re.IGNORECASE): print( f"WARNING: Expected entity mask similar to " f"'[{entity.category}]' but got: {redacted_text}" @@ -147,6 +145,8 @@ async def sample_conversation_pii_with_entity_mask_policy_async(): all_ok = False if all_ok: redacted_verified.append(redacted_text) + + # [END conversation_pii_with_entity_mask_policy_async] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_no_mask_policy_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_no_mask_policy_async.py index 78fab72b3e9c..20c20060fa0f 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_no_mask_policy_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_pii_with_no_mask_policy_async.py @@ -67,10 +67,16 @@ async def sample_conversation_pii_with_no_mask_policy_async(): language="en", conversation_items=[ TextConversationItem( - id="1", participant_id="Agent_1", role=ParticipantRole.AGENT, text="Can you provide your name?" + id="1", + participant_id="Agent_1", + role=ParticipantRole.AGENT, + text="Can you provide your name?", ), TextConversationItem( - id="2", participant_id="Customer_1", role=ParticipantRole.CUSTOMER, text="Hi, my name is John Doe." + id="2", + participant_id="Customer_1", + role=ParticipantRole.CUSTOMER, + text="Hi, my name is John Doe.", ), TextConversationItem( id="3", @@ -126,9 +132,9 @@ async def sample_conversation_pii_with_no_mask_policy_async(): ent_text = entity.text or "" detected_entities.append(ent_text) if ent_text not in returned_text: - print( - f"WARNING: Expected entity '{ent_text}' in returned text but not found." - ) + print(f"WARNING: Expected entity '{ent_text}' in returned text but not found.") + + # [END conversation_pii_with_no_mask_policy_async] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_async.py index 272d528b782b..a06eef81fa8a 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_async.py @@ -105,6 +105,8 @@ async def sample_conversation_prediction_async(): print() else: print("Unexpected result type from analyze_conversation.") + + # [END conversation_prediction_async] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_with_language_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_with_language_async.py index a0191edc092e..d138299b0674 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_with_language_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_with_language_async.py @@ -107,6 +107,8 @@ async def sample_conversation_prediction_with_language_async(): print() else: print("Unexpected result type from analyze_conversation.") + + # [END conversation_prediction_with_language_async] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_with_options_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_with_options_async.py index 3c2d608883e6..bf4a05ec1f0b 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_with_options_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_prediction_with_options_async.py @@ -106,6 +106,8 @@ async def sample_conversation_prediction_with_options_async(): print() else: print("Unexpected result type from analyze_conversation.") + + # [END conversation_prediction_with_options_async] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_summarization_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_summarization_async.py index 7da9ce370319..caeb51d1f659 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_summarization_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conversation_summarization_async.py @@ -141,6 +141,8 @@ async def sample_conversation_summarization_async(): for error in d["errors"]: if isinstance(error, ConversationError): print(f" Code: {error.code} - {error.message}") + + # [END conversation_summarization_async] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_orchestration_prediction_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_orchestration_prediction_async.py index d33bc4cd6235..8eec8d969ef3 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_orchestration_prediction_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_orchestration_prediction_async.py @@ -102,6 +102,8 @@ async def sample_orchestration_prediction_async(): print("Prediction was not an OrchestrationPrediction.") else: print("Unexpected result type from analyze_conversation.") + + # [END orchestration_prediction_async] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_authentication.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_authentication.py index a87132a11b7e..1c494e8f03ba 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_authentication.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_authentication.py @@ -56,6 +56,7 @@ def sample_authentication_with_azure_active_directory(): clu_client = ConversationAnalysisClient(endpoint, credential=credential) + def main(): sample_authentication_api_key() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_multi_turn_prediction.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_multi_turn_prediction.py index ff49ab7ee62c..d0ddfeea9404 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_multi_turn_prediction.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_multi_turn_prediction.py @@ -142,8 +142,7 @@ def sample_conversation_multi_turn_prediction(): print(f" Confidence: {ent.confidence_score}") print(f" Offset: {ent.offset}, Length: {ent.length}") print( - f" Conversation Item ID: {ent.conversation_item_id}, " - f"Index: {ent.conversation_item_index}" + f" Conversation Item ID: {ent.conversation_item_id}, " f"Index: {ent.conversation_item_index}" ) for extra in ent.extra_information or []: @@ -154,6 +153,8 @@ def sample_conversation_multi_turn_prediction(): print("-" * 40) else: print("No Conversational AI result returned.") + + # [END conversation_multi_turn_prediction] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii.py index 71be403eca9d..0a2ee2448ecd 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii.py @@ -159,6 +159,8 @@ def sample_conversation_pii(): print("\nErrors:") for err in d["errors"]: print(f" Code: {err.code} - {err.message}") + + # [END conversation_pii] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_character_mask_policy.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_character_mask_policy.py index 900fbde74b7b..174bf4d8ad63 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_character_mask_policy.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_character_mask_policy.py @@ -136,6 +136,8 @@ def sample_conversation_pii_with_character_mask_policy(): if "*" in redacted_text: redacted_verified.append(redacted_text) + + # [END conversation_pii_with_character_mask_policy] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_entity_mask_policy.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_entity_mask_policy.py index a48fe3fe850e..683b4bfe4323 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_entity_mask_policy.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_entity_mask_policy.py @@ -145,6 +145,8 @@ def sample_conversation_pii_with_entity_mask_policy(): all_ok = False if all_ok: redacted_verified.append(redacted_text) + + # [END conversation_pii_with_entity_mask_policy] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_no_mask_policy.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_no_mask_policy.py index 6704c7b56f8e..9f8a94c6bad2 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_no_mask_policy.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_pii_with_no_mask_policy.py @@ -130,9 +130,9 @@ def sample_conversation_pii_with_no_mask_policy(): ent_text = entity.text or "" detected_entities.append(ent_text) if ent_text not in returned_text: - print( - f"WARNING: Expected entity '{ent_text}' in returned text but not found." - ) + print(f"WARNING: Expected entity '{ent_text}' in returned text but not found.") + + # [END conversation_pii_with_no_mask_policy] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction.py index e4803316242b..b54ed43434f1 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction.py @@ -104,6 +104,8 @@ def sample_conversation_prediction(): print() else: print("Unexpected result type from analyze_conversation.") + + # [END conversation_prediction] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction_with_language.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction_with_language.py index eb1b48a14b01..b43e27874c44 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction_with_language.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction_with_language.py @@ -106,6 +106,8 @@ def sample_conversation_prediction_with_language(): print() else: print("Unexpected result type from analyze_conversation.") + + # [END conversation_prediction_with_language] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction_with_options.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction_with_options.py index c032d74bc9e3..ddb054b6f92f 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction_with_options.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_prediction_with_options.py @@ -105,6 +105,8 @@ def sample_conversation_prediction_with_options(): print() else: print("Unexpected result type from analyze_conversation.") + + # [END conversation_prediction_with_options] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_summarization.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_summarization.py index 6658af4cefad..09edb2215257 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_summarization.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_conversation_summarization.py @@ -141,6 +141,8 @@ def sample_conversation_summarization(): for error in d["errors"]: if isinstance(error, ConversationError): print(f" Code: {error.code} - {error.message}") + + # [END conversation_summarization] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_orchestration_prediction.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_orchestration_prediction.py index cf4bfaaf7a57..e61ec24caf58 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_orchestration_prediction.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_orchestration_prediction.py @@ -101,6 +101,8 @@ def sample_orchestration_prediction(): print("Prediction was not an OrchestrationPrediction.") else: print("Unexpected result type from analyze_conversation.") + + # [END orchestration_prediction] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/sdk_packaging.toml b/sdk/cognitivelanguage/azure-ai-language-conversations/sdk_packaging.toml deleted file mode 100644 index 901bc8ccbfa6..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/sdk_packaging.toml +++ /dev/null @@ -1,2 +0,0 @@ -[packaging] -auto_update = false diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tsp-location.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tsp-location.yaml index 30bf644fdc5f..92c4490aa759 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tsp-location.yaml +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tsp-location.yaml @@ -1,4 +1,4 @@ -directory: specification/cognitiveservices/Language.Conversations -commit: a87f945a3b9ce9b62a6ec5d8a4e9f2a1f7efff52 +directory: specification/cognitiveservices/data-plane/LanguageAnalyzeConversations +commit: 5b0855a6739c8dbd68136d364d7fc5ed1aa0c43f repo: Azure/azure-rest-api-specs additionalDirectories: From 082bf5f467a9e3f71673a41b1f30e1713d9118f0 Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Wed, 5 Nov 2025 22:22:54 -0800 Subject: [PATCH 02/10] removed generated samples and tests --- ...ccessful_analyze_conversational_ai_task.py | 68 ------------------- .../successful_analyze_conversations.py | 43 ------------ ...ssful_analyze_conversations_arbitration.py | 51 -------------- ...conversations_arbitration_direct_target.py | 51 -------------- ...alyze_conversations_jobs_cancel_request.py | 32 --------- .../generated_tests/conftest.py | 45 ------------ .../test_conversation_analysis.py | 56 --------------- .../test_conversation_analysis_async.py | 59 ---------------- .../generated_tests/testpreparer.py | 28 -------- .../generated_tests/testpreparer_async.py | 20 ------ 10 files changed, 453 deletions(-) delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversational_ai_task.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration_direct_target.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_jobs_cancel_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/conftest.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis_async.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer.py delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer_async.py diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversational_ai_task.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversational_ai_task.py deleted file mode 100644 index da3831d99f2e..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversational_ai_task.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.conversations import ConversationAnalysis - -""" -# PREREQUISITES - pip install azure-ai-language-conversations -# USAGE - python successful_analyze_conversational_ai_task.py -""" - - -def main(): - client = ConversationAnalysis( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_conversation( - body={ - "analysisInput": { - "conversations": [ - { - "conversationItems": [ - {"id": "1", "participantId": "user", "text": "Hi!"}, - {"id": "2", "participantId": "bot", "text": "Hello, how can I help you?"}, - { - "id": "3", - "participantId": "user", - "text": "I would like to book a flight from Paris to Berlin on Oct 10th.", - }, - {"id": "4", "participantId": "bot", "text": "Do you have any airline preference?"}, - {"id": "5", "participantId": "user", "text": "No."}, - {"id": "6", "participantId": "user", "text": "I like New York and Boston."}, - { - "id": "7", - "participantId": "user", - "text": "Actualy, change the destination to New York.", - }, - {"id": "8", "participantId": "user", "text": "Wait, I do not like the food in New York."}, - {"id": "9", "participantId": "user", "text": "Ok, change the destination back to Berlin."}, - ], - "id": "order", - "language": "en", - "modality": "text", - } - ] - }, - "kind": "ConversationalAI", - "parameters": { - "deploymentName": "deployment1", - "projectName": "project1", - "stringIndexType": "Utf16CodeUnit", - }, - }, - ) - print(response) - - -# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversationalAITask.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations.py deleted file mode 100644 index ccd626af6854..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.conversations import ConversationAnalysis - -""" -# PREREQUISITES - pip install azure-ai-language-conversations -# USAGE - python successful_analyze_conversations.py -""" - - -def main(): - client = ConversationAnalysis( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_conversation( - body={ - "analysisInput": { - "conversationItem": {"id": "1", "participantId": "1", "text": "Book a flight to Seattle on Oct 10th"} - }, - "kind": "Conversation", - "parameters": { - "deploymentName": "{deployment-name}", - "projectName": "{project-name}", - "stringIndexType": "TextElement_V8", - }, - }, - ) - print(response) - - -# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversations.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration.py deleted file mode 100644 index bdfe88e0653f..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.conversations import ConversationAnalysis - -""" -# PREREQUISITES - pip install azure-ai-language-conversations -# USAGE - python successful_analyze_conversations_arbitration.py -""" - - -def main(): - client = ConversationAnalysis( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_conversation( - body={ - "analysisInput": { - "conversationItem": { - "id": "1", - "language": "en-GB", - "modality": "text", - "participantId": "1", - "text": "How do I integrate QnA Maker and LUIS?", - } - }, - "kind": "Conversation", - "parameters": { - "deploymentName": "{deployment-name}", - "isLoggingEnabled": False, - "projectName": "{project-name}", - "stringIndexType": "TextElement_V8", - "verbose": True, - }, - }, - ) - print(response) - - -# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversationsArbitration.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration_direct_target.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration_direct_target.py deleted file mode 100644 index 030bca19568e..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_arbitration_direct_target.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.conversations import ConversationAnalysis - -""" -# PREREQUISITES - pip install azure-ai-language-conversations -# USAGE - python successful_analyze_conversations_arbitration_direct_target.py -""" - - -def main(): - client = ConversationAnalysis( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_conversation( - body={ - "analysisInput": {"conversationItem": {"id": "1", "participantId": "1", "text": "Ports and connectors"}}, - "kind": "Conversation", - "parameters": { - "deploymentName": "dep1", - "directTarget": "qnaProject", - "projectName": "prj1", - "targetProjectParameters": { - "qnaProject": { - "callingOptions": { - "context": {"previousQnaId": 4, "previousUserQuery": "Meet Surface Pro 4"}, - "question": "App Service overview", - "top": 1, - }, - "targetProjectKind": "QuestionAnswering", - } - }, - }, - }, - ) - print(response) - - -# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversationsArbitrationDirectTarget.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_jobs_cancel_request.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_jobs_cancel_request.py deleted file mode 100644 index 5555747fd6ab..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_samples/successful_analyze_conversations_jobs_cancel_request.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.conversations import ConversationAnalysis - -""" -# PREREQUISITES - pip install azure-ai-language-conversations -# USAGE - python successful_analyze_conversations_jobs_cancel_request.py -""" - - -def main(): - client = ConversationAnalysis( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - client.begin_cancel_job( - job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", - ).result() - - -# x-ms-original-file: 2025-11-15-preview/SuccessfulAnalyzeConversationsJobsCancelRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/conftest.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/conftest.py deleted file mode 100644 index 2af8caaf96fb..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/conftest.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import os -import pytest -from dotenv import load_dotenv -from devtools_testutils import ( - test_proxy, - add_general_regex_sanitizer, - add_body_key_sanitizer, - add_header_regex_sanitizer, -) - -load_dotenv() - - -# For security, please avoid record sensitive identity information in recordings -@pytest.fixture(scope="session", autouse=True) -def add_sanitizers(test_proxy): - conversationanalysis_subscription_id = os.environ.get( - "CONVERSATIONANALYSIS_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000" - ) - conversationanalysis_tenant_id = os.environ.get( - "CONVERSATIONANALYSIS_TENANT_ID", "00000000-0000-0000-0000-000000000000" - ) - conversationanalysis_client_id = os.environ.get( - "CONVERSATIONANALYSIS_CLIENT_ID", "00000000-0000-0000-0000-000000000000" - ) - conversationanalysis_client_secret = os.environ.get( - "CONVERSATIONANALYSIS_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000" - ) - add_general_regex_sanitizer( - regex=conversationanalysis_subscription_id, value="00000000-0000-0000-0000-000000000000" - ) - add_general_regex_sanitizer(regex=conversationanalysis_tenant_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=conversationanalysis_client_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=conversationanalysis_client_secret, value="00000000-0000-0000-0000-000000000000") - - add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") - add_header_regex_sanitizer(key="Cookie", value="cookie;") - add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis.py deleted file mode 100644 index 187320005502..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import ConversationAnalysisPreparer, ConversationAnalysisTestBase - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestConversationAnalysis(ConversationAnalysisTestBase): - @ConversationAnalysisPreparer() - @recorded_by_proxy - def test_analyze_conversation(self, conversationanalysis_endpoint): - client = self.create_client(endpoint=conversationanalysis_endpoint) - response = client.analyze_conversation( - body={ - "analysisInput": { - "conversationItem": { - "id": "str", - "participantId": "str", - "text": "str", - "language": "str", - "modality": "str", - "role": "str", - } - }, - "kind": "Conversation", - "parameters": { - "deploymentName": "str", - "projectName": "str", - "directTarget": "str", - "isLoggingEnabled": bool, - "stringIndexType": "str", - "targetProjectParameters": {"str": "analysis_config"}, - "verbose": bool, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @ConversationAnalysisPreparer() - @recorded_by_proxy - def test_begin_cancel_job(self, conversationanalysis_endpoint): - client = self.create_client(endpoint=conversationanalysis_endpoint) - response = client.begin_cancel_job( - job_id="str", - ).result() # call '.result()' to poll until service return final result - - # please add some check logic here by yourself - # ... diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis_async.py deleted file mode 100644 index dc57f33515bd..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/test_conversation_analysis_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import ConversationAnalysisPreparer -from testpreparer_async import ConversationAnalysisTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestConversationAnalysisAsync(ConversationAnalysisTestBaseAsync): - @ConversationAnalysisPreparer() - @recorded_by_proxy_async - async def test_analyze_conversation(self, conversationanalysis_endpoint): - client = self.create_async_client(endpoint=conversationanalysis_endpoint) - response = await client.analyze_conversation( - body={ - "analysisInput": { - "conversationItem": { - "id": "str", - "participantId": "str", - "text": "str", - "language": "str", - "modality": "str", - "role": "str", - } - }, - "kind": "Conversation", - "parameters": { - "deploymentName": "str", - "projectName": "str", - "directTarget": "str", - "isLoggingEnabled": bool, - "stringIndexType": "str", - "targetProjectParameters": {"str": "analysis_config"}, - "verbose": bool, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @ConversationAnalysisPreparer() - @recorded_by_proxy_async - async def test_begin_cancel_job(self, conversationanalysis_endpoint): - client = self.create_async_client(endpoint=conversationanalysis_endpoint) - response = await ( - await client.begin_cancel_job( - job_id="str", - ) - ).result() # call '.result()' to poll until service return final result - - # please add some check logic here by yourself - # ... diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer.py deleted file mode 100644 index 3c1ba7cefde7..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from azure.ai.language.conversations import ConversationAnalysis -from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer -import functools - - -class ConversationAnalysisTestBase(AzureRecordedTestCase): - - def create_client(self, endpoint): - credential = self.get_credential(ConversationAnalysis) - return self.create_client_from_credential( - ConversationAnalysis, - credential=credential, - endpoint=endpoint, - ) - - -ConversationAnalysisPreparer = functools.partial( - PowerShellPreparer, - "conversationanalysis", - conversationanalysis_endpoint="https://fake_conversationanalysis_endpoint.com", -) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer_async.py deleted file mode 100644 index d646a8a6be59..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/generated_tests/testpreparer_async.py +++ /dev/null @@ -1,20 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from azure.ai.language.conversations.aio import ConversationAnalysis -from devtools_testutils import AzureRecordedTestCase - - -class ConversationAnalysisTestBaseAsync(AzureRecordedTestCase): - - def create_async_client(self, endpoint): - credential = self.get_credential(ConversationAnalysis, is_async=True) - return self.create_client_from_credential( - ConversationAnalysis, - credential=credential, - endpoint=endpoint, - ) From b0063a3aec04bdc147579bd8ca2245b7c33035b6 Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Wed, 5 Nov 2025 22:29:56 -0800 Subject: [PATCH 03/10] updated version to 2.0.0b2 --- .../azure/ai/language/conversations/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py index be71c81bd282..8eb37199ee54 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b1" +VERSION = "2.0.0b2" From a2e6ac5c90baa73f091af8cfb107a282b3b7464f Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Wed, 5 Nov 2025 23:52:37 -0800 Subject: [PATCH 04/10] updated changelog --- .../azure-ai-language-conversations/CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md b/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md index a304baa3227b..6deeaeb2287c 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md @@ -1,5 +1,9 @@ # Release History +## 2.0.0b2 (2025-11-08) + +### Features Added + ## 2.0.0b1 (2025-08-22) ### Features Added From 5b01951a3cee17be1786fd517403e9d10b502740 Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 6 Nov 2025 09:51:43 -0800 Subject: [PATCH 05/10] updated name ConversationAnalysisClient --- .../_metadata.json | 6 +-- .../apiview-properties.json | 8 ++-- .../ai/language/conversations/__init__.py | 4 +- .../ai/language/conversations/_client.py | 8 ++-- .../language/conversations/_configuration.py | 4 +- .../conversations/_operations/__init__.py | 2 +- .../conversations/_operations/_operations.py | 6 +-- .../ai/language/conversations/aio/__init__.py | 4 +- .../ai/language/conversations/aio/_client.py | 8 ++-- .../conversations/aio/_configuration.py | 4 +- .../conversations/aio/_operations/__init__.py | 2 +- .../aio/_operations/_operations.py | 6 +-- .../pyproject.toml | 39 ++++++++----------- .../tsp-location.yaml | 2 +- 14 files changed, 46 insertions(+), 57 deletions(-) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/_metadata.json b/sdk/cognitivelanguage/azure-ai-language-conversations/_metadata.json index 48f671f46df4..509044270b48 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/_metadata.json +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/_metadata.json @@ -1,7 +1,3 @@ { - "apiVersion": "2025-11-15-preview", - "commit": "5b0855a6739c8dbd68136d364d7fc5ed1aa0c43f", - "repository_url": "https://github.com/Azure/azure-rest-api-specs", - "typespec_src": "specification/cognitiveservices/data-plane/LanguageAnalyzeConversations", - "emitterVersion": "0.53.1" + "apiVersion": "2025-11-15-preview" } \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/apiview-properties.json b/sdk/cognitivelanguage/azure-ai-language-conversations/apiview-properties.json index 769cab9f7a56..3bcabbe0c6ab 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/apiview-properties.json +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/apiview-properties.json @@ -126,9 +126,9 @@ "azure.ai.language.conversations.models.AnalyzeConversationOperationResultsKind": "Language.Conversations.AnalyzeConversationResultsKind", "azure.ai.language.conversations.models.SummaryLengthBucket": "Language.Conversations.SummaryLengthBucket", "azure.ai.language.conversations.models.SummaryAspect": "Language.Conversations.SummaryAspect", - "azure.ai.language.conversations.ConversationAnalysis.analyze_conversation": "Language.Conversations.analyzeConversations", - "azure.ai.language.conversations.aio.ConversationAnalysis.analyze_conversation": "Language.Conversations.analyzeConversations", - "azure.ai.language.conversations.ConversationAnalysis.begin_cancel_job": "Language.Conversations.analyzeConversationsCancelJob", - "azure.ai.language.conversations.aio.ConversationAnalysis.begin_cancel_job": "Language.Conversations.analyzeConversationsCancelJob" + "azure.ai.language.conversations.ConversationAnalysisClient.analyze_conversation": "Language.Conversations.analyzeConversations", + "azure.ai.language.conversations.aio.ConversationAnalysisClient.analyze_conversation": "Language.Conversations.analyzeConversations", + "azure.ai.language.conversations.ConversationAnalysisClient.begin_cancel_job": "Language.Conversations.analyzeConversationsCancelJob", + "azure.ai.language.conversations.aio.ConversationAnalysisClient.begin_cancel_job": "Language.Conversations.analyzeConversationsCancelJob" } } \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py index dc13f060daf4..93ba412cb15c 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._client import ConversationAnalysis # type: ignore +from ._client import ConversationAnalysisClient # type: ignore from ._version import VERSION __version__ = VERSION @@ -25,7 +25,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "ConversationAnalysis", + "ConversationAnalysisClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_client.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_client.py index c337d532768f..972c7b06b7a7 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_client.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_client.py @@ -15,15 +15,15 @@ from azure.core.pipeline import policies from azure.core.rest import HttpRequest, HttpResponse -from ._configuration import ConversationAnalysisConfiguration -from ._operations import _ConversationAnalysisOperationsMixin +from ._configuration import ConversationAnalysisClientConfiguration +from ._operations import _ConversationAnalysisClientOperationsMixin from ._utils.serialization import Deserializer, Serializer if TYPE_CHECKING: from azure.core.credentials import TokenCredential -class ConversationAnalysis(_ConversationAnalysisOperationsMixin): +class ConversationAnalysisClient(_ConversationAnalysisClientOperationsMixin): """The language service conversations API is a suite of natural language processing (NLP) skills that can be used to analyze structured conversations (textual or spoken). The synchronous API in this suite accepts a request and mediates among multiple language projects, such as LUIS @@ -50,7 +50,7 @@ class ConversationAnalysis(_ConversationAnalysisOperationsMixin): def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: _endpoint = "{Endpoint}/language" - self._config = ConversationAnalysisConfiguration(endpoint=endpoint, credential=credential, **kwargs) + self._config = ConversationAnalysisClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py index 0191d8cc47f5..798643ca7242 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py @@ -17,8 +17,8 @@ from azure.core.credentials import TokenCredential -class ConversationAnalysisConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for ConversationAnalysis. +class ConversationAnalysisClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for ConversationAnalysisClient. Note that all parameters used to create this instance are saved as instance attributes. diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/__init__.py index 4f55a6b05b89..90c37e0a0e5b 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import _ConversationAnalysisOperationsMixin # type: ignore # pylint: disable=unused-import +from ._operations import _ConversationAnalysisClientOperationsMixin # type: ignore # pylint: disable=unused-import from ._patch import __all__ as _patch_all from ._patch import * diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py index 29daeb3f6faf..b3de4bc23998 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py @@ -29,7 +29,7 @@ from azure.core.utils import case_insensitive_dict from .. import models as _models -from .._configuration import ConversationAnalysisConfiguration +from .._configuration import ConversationAnalysisClientConfiguration from .._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize from .._utils.serialization import Serializer from .._utils.utils import ClientMixinABC @@ -136,8 +136,8 @@ def build_conversation_analysis_cancel_job_request( # pylint: disable=name-too- return HttpRequest(method="POST", url=_url, params=_params, **kwargs) -class _ConversationAnalysisOperationsMixin( - ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], ConversationAnalysisConfiguration] +class _ConversationAnalysisClientOperationsMixin( + ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], ConversationAnalysisClientConfiguration] ): @overload diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py index 732438501d11..0276ad109495 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._client import ConversationAnalysis # type: ignore +from ._client import ConversationAnalysisClient # type: ignore try: from ._patch import __all__ as _patch_all @@ -22,7 +22,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "ConversationAnalysis", + "ConversationAnalysisClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_client.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_client.py index 8b5407baa17c..7980d98238ac 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_client.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_client.py @@ -16,14 +16,14 @@ from azure.core.rest import AsyncHttpResponse, HttpRequest from .._utils.serialization import Deserializer, Serializer -from ._configuration import ConversationAnalysisConfiguration -from ._operations import _ConversationAnalysisOperationsMixin +from ._configuration import ConversationAnalysisClientConfiguration +from ._operations import _ConversationAnalysisClientOperationsMixin if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential -class ConversationAnalysis(_ConversationAnalysisOperationsMixin): +class ConversationAnalysisClient(_ConversationAnalysisClientOperationsMixin): """The language service conversations API is a suite of natural language processing (NLP) skills that can be used to analyze structured conversations (textual or spoken). The synchronous API in this suite accepts a request and mediates among multiple language projects, such as LUIS @@ -52,7 +52,7 @@ def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: _endpoint = "{Endpoint}/language" - self._config = ConversationAnalysisConfiguration(endpoint=endpoint, credential=credential, **kwargs) + self._config = ConversationAnalysisClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py index cea38d85d71e..39c4ff96b910 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py @@ -17,8 +17,8 @@ from azure.core.credentials_async import AsyncTokenCredential -class ConversationAnalysisConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for ConversationAnalysis. +class ConversationAnalysisClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for ConversationAnalysisClient. Note that all parameters used to create this instance are saved as instance attributes. diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/__init__.py index 4f55a6b05b89..90c37e0a0e5b 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import _ConversationAnalysisOperationsMixin # type: ignore # pylint: disable=unused-import +from ._operations import _ConversationAnalysisClientOperationsMixin # type: ignore # pylint: disable=unused-import from ._patch import __all__ as _patch_all from ._patch import * diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py index d2456af5ae41..9f1d3b7cbaf7 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py @@ -39,15 +39,15 @@ from ..._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize from ..._utils.utils import ClientMixinABC from ..._validation import api_version_validation -from .._configuration import ConversationAnalysisConfiguration +from .._configuration import ConversationAnalysisClientConfiguration JSON = MutableMapping[str, Any] T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] -class _ConversationAnalysisOperationsMixin( - ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], ConversationAnalysisConfiguration] +class _ConversationAnalysisClientOperationsMixin( + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], ConversationAnalysisClientConfiguration] ): @overload diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/pyproject.toml b/sdk/cognitivelanguage/azure-ai-language-conversations/pyproject.toml index af4964e66aeb..7ec2b439e6be 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/pyproject.toml +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/pyproject.toml @@ -1,14 +1,18 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + [build-system] -requires = [ - "setuptools>=77.0.3", - "wheel", -] +requires = ["setuptools>=77.0.3", "wheel"] build-backend = "setuptools.build_meta" [project] name = "azure-ai-language-conversations" authors = [ - { name = "Microsoft Corporation", email = "azpysdkhelp@microsoft.com" }, + { name = "Microsoft Corporation", email = "azpysdkhelp@microsoft.com" }, ] description = "Microsoft Corporation Azure Ai Language Conversations Client Library for Python" license = "MIT" @@ -24,32 +28,23 @@ classifiers = [ "Programming Language :: Python :: 3.13", ] requires-python = ">=3.9" -keywords = [ - "azure", - "azure sdk", -] +keywords = ["azure", "azure sdk"] + dependencies = [ "isodate>=0.6.1", "azure-core>=1.35.0", "typing-extensions>=4.6.0", ] dynamic = [ - "version", - "readme", +"version", "readme" ] [project.urls] repository = "https://github.com/Azure/azure-sdk-for-python" -[tool.setuptools.dynamic.version] -attr = "azure.ai.language.conversations._version.VERSION" - -[tool.setuptools.dynamic.readme] -file = [ - "README.md", - "CHANGELOG.md", -] -content-type = "text/markdown" +[tool.setuptools.dynamic] +version = {attr = "azure.ai.language.conversations._version.VERSION"} +readme = {file = ["README.md", "CHANGELOG.md"], content-type = "text/markdown"} [tool.setuptools.packages.find] exclude = [ @@ -64,9 +59,7 @@ exclude = [ ] [tool.setuptools.package-data] -pytyped = [ - "py.typed", -] +pytyped = ["py.typed"] [packaging] auto_update = false diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tsp-location.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tsp-location.yaml index 92c4490aa759..2329eaff3b86 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tsp-location.yaml +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/cognitiveservices/data-plane/LanguageAnalyzeConversations -commit: 5b0855a6739c8dbd68136d364d7fc5ed1aa0c43f +commit: 294191f181b429d1c138e109e7dc8de8e01fbc91 repo: Azure/azure-rest-api-specs additionalDirectories: From cb05d565c99c5a50be6f10ef3692d76a601c3254 Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 6 Nov 2025 10:21:52 -0800 Subject: [PATCH 06/10] updated patch --- .../azure/ai/language/conversations/_patch.py | 300 ++++++++++++++++- .../ai/language/conversations/aio/_patch.py | 305 +++++++++++++++++- .../language/conversations/models/_patch.py | 31 +- .../tests/test_conversation_prediction.py | 4 - 4 files changed, 631 insertions(+), 9 deletions(-) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py index 87676c65a8f0..28d395e4c5dc 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -7,9 +8,303 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import json +from typing import Any, Callable, Dict, IO, Mapping, Optional, TypeVar, Union, cast, overload, Generic, TYPE_CHECKING +from collections.abc import MutableMapping # pylint:disable=import-error +from urllib.parse import urlparse +from azure.core.exceptions import ( + HttpResponseError, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.polling.base_polling import LROBasePolling +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.core.credentials import AzureKeyCredential +from azure.core.paging import ItemPaged +from ._client import ConversationAnalysisClient as AnalysisClientGenerated +from .models import AnalyzeConversationOperationInput, AnalyzeConversationOperationState, ConversationActions +from ._utils.serialization import Serializer +from ._validation import api_version_validation -__all__: list[str] = [] # Add all objects you want publicly available to users at this package level +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + +JSON = MutableMapping[str, Any] +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def _parse_operation_id(op_loc: Optional[str]) -> Optional[str]: + """Extract the operation ID from an Operation-Location URL. + + :param op_loc: The ``Operation-Location`` header value or URL to parse. + If ``None`` or malformed, no ID can be extracted. + :type op_loc: Optional[str] + :return: The trailing path segment as the operation ID, or ``None`` if not found. + :rtype: Optional[str] + """ + if not op_loc: + return None + path = urlparse(op_loc).path.rstrip("/") + if "/" not in path: + return None + return path.rsplit("/", 1)[-1] + + +PollingReturnType_co = TypeVar("PollingReturnType_co", covariant=True) + + +class AnalyzeConversationLROPoller(LROPoller[PollingReturnType_co], Generic[PollingReturnType_co]): + """Custom poller that returns PollingReturnType_co and exposes operation metadata.""" + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + # populated by the deserialization callback in your begin_* method + self._last_state: Optional["AnalyzeConversationOperationState"] = None + + def _record_state_for_details(self, state: "AnalyzeConversationOperationState") -> None: + """Internal: update the state used by ``.details``. + + :param state: The latest operation state to expose via ``details``. + :type state: AnalyzeConversationOperationState + :return: None + :rtype: None + """ + self._last_state = state + + @property + def details(self) -> Mapping[str, Any]: + """Metadata associated with the long-running operation. + + :return: A mapping with keys like ``operation_id`` and, when available, + ``status``, ``job_id``, ``display_name``, ``created_date_time``, + ``last_updated_date_time``, ``expiration_date_time``, ``statistics``, + ``errors``, and ``next_link``. + :rtype: Mapping[str, Any] + """ + try: + headers = getattr(self.polling_method(), "_initial_response").http_response.headers # type: ignore[attr-defined] + op_loc = headers.get("Operation-Location") or headers.get("operation-location") + except (AttributeError, TypeError): + # missing attributes in the chain, or headers is not a mapping + op_loc = None + + op_id = _parse_operation_id(op_loc) + info: Dict[str, Any] = {"operation_id": op_id} + + # Merge fields from the final state (if available) + if self._last_state is not None: + s = self._last_state + info.update( + { + "status": s.status, + "job_id": s.job_id, + "display_name": s.display_name, + "created_date_time": s.created_date_time, + "last_updated_date_time": s.last_updated_date_time, + "expiration_date_time": s.expiration_date_time, + "statistics": s.statistics, + "errors": s.errors, + "next_link": s.next_link, + } + ) + return info + + @classmethod + def from_continuation_token( + cls, + polling_method: PollingMethod[PollingReturnType_co], + continuation_token: str, + **kwargs: Any, + ) -> "AnalyzeConversationLROPoller[PollingReturnType_co]": + client, initial_response, deserialization_callback = polling_method.from_continuation_token( + continuation_token, **kwargs + ) + return cls(client, initial_response, deserialization_callback, polling_method) + + +class ConversationAnalysisClient(AnalysisClientGenerated): + + def __init__( + self, + endpoint: str, + credential: Union[AzureKeyCredential, "TokenCredential"], + *, + api_version: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Create a ConversationAnalysisClient. + :param endpoint: Supported Cognitive Services endpoint. + :type endpoint: str + :param credential: Key or token credential. + :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str` + """ + if api_version is not None: + kwargs["api_version"] = api_version + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + + @overload + def begin_analyze_conversation_job( + self, body: AnalyzeConversationOperationInput, *, content_type: str = "application/json", **kwargs: Any + ) -> AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]: + """Analyzes the input conversation utterance. + + :param body: The input for the analyze conversations operation. Required. + :type body: ~azure.ai.language.conversations.models.AnalyzeConversationOperationInput + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: A poller whose ``result()`` yields ``ItemPaged[ConversationActions]`` and exposes metadata via ``.details``. + :rtype: ~azure.ai.language.conversations.AnalyzeConversationLROPoller[ + ~azure.core.paging.ItemPaged[~azure.ai.language.conversations.models.ConversationActions]] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_analyze_conversation_job( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]: + """Analyzes the input conversation utterance. + + :param body: The input for the analyze conversations operation. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: A poller whose ``result()`` yields ``ItemPaged[ConversationActions]`` and exposes metadata via ``.details``. + :rtype: ~azure.ai.language.conversations.AnalyzeConversationLROPoller[ + ~azure.core.paging.ItemPaged[~azure.ai.language.conversations.models.ConversationActions]] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_analyze_conversation_job( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]: + """Analyzes the input conversation utterance. + + :param body: The input for the analyze conversations operation. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: A poller whose ``result()`` yields ``ItemPaged[ConversationActions]`` and exposes metadata via ``.details``. + :rtype: ~azure.ai.language.conversations.AnalyzeConversationLROPoller[ + ~azure.core.paging.ItemPaged[~azure.ai.language.conversations.models.ConversationActions]] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + method_added_on="2023-04-01", + params_added_on={"2023-04-01": ["api_version", "content_type", "accept"]}, + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + ) + def begin_analyze_conversation_job( # type: ignore[override] + self, body: Union[AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any + ) -> AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]: + """Analyzes the input conversation utterance. + + :param body: The input for the analyze conversations operation. Required. + :type body: ~azure.ai.language.conversations.models.AnalyzeConversationOperationInput or JSON or IO[bytes] + :return: A poller whose ``result()`` yields ``ItemPaged[ConversationActions]`` and exposes metadata via ``.details``. + :rtype: ~azure.ai.language.conversations.AnalyzeConversationLROPoller[ + ~azure.core.paging.ItemPaged[~azure.ai.language.conversations.models.ConversationActions]] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + polling: Union[bool, PollingMethod[ItemPaged["ConversationActions"]]] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + cls = kwargs.pop("cls", None) # optional custom deserializer + kwargs.pop("error_map", None) + + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + def _fetch_state_by_next_link(next_link: str) -> AnalyzeConversationOperationState: + req = HttpRequest("GET", next_link) + resp = self._client.send_request(req) # type: ignore[attr-defined] + if resp.status_code != 200: + raise HttpResponseError(response=resp) + data = json.loads(resp.text()) + return AnalyzeConversationOperationState(data) + + def _build_pager_from_state(state: AnalyzeConversationOperationState) -> ItemPaged["ConversationActions"]: + def extract_data(s: AnalyzeConversationOperationState): + next_link = s.next_link + actions: ConversationActions = s.actions + return next_link, [actions] + + def get_next(token: Optional[str]) -> Optional[AnalyzeConversationOperationState]: + if token is None: + return state + if not token: + return None + return _fetch_state_by_next_link(token) + + return ItemPaged(get_next, extract_data) + + # ----- end paging helpers + + # filled after creating the poller; used inside the deserializer + poller_holder: Dict[str, AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]]] = {} + + def get_long_running_output(pipeline_response): + final_response = pipeline_response.http_response + if final_response.status_code == 200: + data = json.loads(final_response.text()) + op_state = AnalyzeConversationOperationState(data) + + # stash state on the custom poller for `.details` + poller_ref = poller_holder["poller"] + poller_ref._record_state_for_details(op_state) # pylint:disable=protected-access + + paged = _build_pager_from_state(op_state) + return cls(pipeline_response, paged, {}) if cls else paged + raise HttpResponseError(response=final_response) + + # ----- polling method selection + if polling is True: + polling_method: PollingMethod[ItemPaged["ConversationActions"]] = cast( + PollingMethod[ItemPaged["ConversationActions"]], + LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(PollingMethod[ItemPaged["ConversationActions"]], NoPolling()) + else: + polling_method = cast(PollingMethod[ItemPaged["ConversationActions"]], polling) + + if cont_token: + return AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + ) + + # Submit the job + raw_result = self._analyze_conversation_job_initial( + body=body, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + + lro: AnalyzeConversationLROPoller[ItemPaged["ConversationActions"]] = AnalyzeConversationLROPoller( + self._client, raw_result, get_long_running_output, polling_method + ) + poller_holder["poller"] = lro + return lro def patch_sdk(): @@ -19,3 +314,6 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ + + +__all__ = ["ConversationAnalysisClient", "AnalyzeConversationLROPoller"] \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py index 87676c65a8f0..55711160700e 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -7,9 +8,308 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import json +from typing import Any, Callable, Dict, IO, Mapping, Optional, TypeVar, Union, cast, overload, Generic, TYPE_CHECKING +from collections.abc import MutableMapping # pylint:disable=import-error +from urllib.parse import urlparse +from azure.core.exceptions import ( + HttpResponseError, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.polling.async_base_polling import AsyncLROBasePolling +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.core.credentials import AzureKeyCredential +from azure.core.async_paging import AsyncItemPaged +from ._client import ConversationAnalysisClient as AnalysisClientGenerated +from ..models import AnalyzeConversationOperationInput, AnalyzeConversationOperationState, ConversationActions +from .._validation import api_version_validation -__all__: list[str] = [] # Add all objects you want publicly available to users at this package level +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + +JSON = MutableMapping[str, Any] +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +def _parse_operation_id(op_loc: Optional[str]) -> Optional[str]: + """Extract the operation ID from an Operation-Location URL. + + :param op_loc: The ``Operation-Location`` header value or URL to parse. + If ``None`` or malformed, no ID can be extracted. + :type op_loc: Optional[str] + :return: The trailing path segment as the operation ID, or ``None`` if not found. + :rtype: Optional[str] + """ + if not op_loc: + return None + path = urlparse(op_loc).path.rstrip("/") + if "/" not in path: + return None + return path.rsplit("/", 1)[-1] + + +PollingReturnType_co = TypeVar("PollingReturnType_co", covariant=True) + + +class AnalyzeConversationAsyncLROPoller(AsyncLROPoller[PollingReturnType_co], Generic[PollingReturnType_co]): + """Async poller that returns PollingReturnType_co and exposes operation metadata.""" + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + # populated by your deserialization callback in begin_*_async + self._last_state: Optional["AnalyzeConversationOperationState"] = None + + def _record_state_for_details(self, state: "AnalyzeConversationOperationState") -> None: + """Internal: update the state used by ``.details``. + + :param state: The latest operation state to expose via ``details``. + :type state: AnalyzeConversationOperationState + :return: None + :rtype: None + """ + self._last_state = state + + @property + def details(self) -> Mapping[str, Any]: + """Metadata associated with the long-running operation. + + :return: A mapping with keys like ``operation_id`` and, when available, + ``status``, ``job_id``, ``display_name``, ``created_date_time``, + ``last_updated_date_time``, ``expiration_date_time``, ``statistics``, + ``errors``, and ``next_link``. + :rtype: Mapping[str, Any] + """ + try: + headers = getattr(self.polling_method(), "_initial_response").http_response.headers # type: ignore[attr-defined] + op_loc = headers.get("Operation-Location") or headers.get("operation-location") + except (AttributeError, TypeError): + # missing attributes in the chain, or headers is not a mapping + op_loc = None + + info: Dict[str, Any] = {"operation_id": _parse_operation_id(op_loc)} + + # Enrich from final state if available + if self._last_state is not None: + s = self._last_state + info.update( + { + "status": s.status, + "job_id": s.job_id, + "display_name": s.display_name, + "created_date_time": s.created_date_time, + "last_updated_date_time": s.last_updated_date_time, + "expiration_date_time": s.expiration_date_time, + "statistics": s.statistics, + "errors": s.errors, + "next_link": s.next_link, + } + ) + return info + + @classmethod + def from_continuation_token( + cls, + polling_method: AsyncPollingMethod[PollingReturnType_co], + continuation_token: str, + **kwargs: Any, + ) -> "AnalyzeConversationAsyncLROPoller[PollingReturnType_co]": + client, initial_response, deserialization_callback = polling_method.from_continuation_token( + continuation_token, **kwargs + ) + return cls(client, initial_response, deserialization_callback, polling_method) + + +class ConversationAnalysisClient(AnalysisClientGenerated): + + def __init__( + self, + endpoint: str, + credential: Union[AzureKeyCredential, "AsyncTokenCredential"], + *, + api_version: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Create a ConversationAnalysisClient. + :param endpoint: Supported Cognitive Services endpoint. + :type endpoint: str + :param credential: Key or token credential. + :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str` + """ + if api_version is not None: + kwargs["api_version"] = api_version + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + + @overload + async def begin_analyze_conversation_job( + self, body: AnalyzeConversationOperationInput, *, content_type: str = "application/json", **kwargs: Any + ) -> AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]: + """Analyzes the input conversation utterance. + + :param body: The input for the analyze conversations operation. Required. + :type body: ~azure.ai.language.conversations.models.AnalyzeConversationOperationInput + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An async poller whose ``result()`` yields ``AsyncItemPaged[ConversationActions]``; exposes metadata via ``.details``. + :rtype: ~azure.ai.language.conversations.AnalyzeConversationAsyncLROPoller[ + ~azure.core.async_paging.AsyncItemPaged[~azure.ai.language.conversations.models.ConversationActions]] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_analyze_conversation_job( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]: + """Analyzes the input conversation utterance. + + :param body: The input for the analyze conversations operation. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An async poller whose ``result()`` yields ``AsyncItemPaged[ConversationActions]``; exposes metadata via ``.details``. + :rtype: ~azure.ai.language.conversations.AnalyzeConversationAsyncLROPoller[ + ~azure.core.async_paging.AsyncItemPaged[~azure.ai.language.conversations.models.ConversationActions]] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_analyze_conversation_job( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]: + """Analyzes the input conversation utterance. + + :param body: The input for the analyze conversations operation. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An async poller whose ``result()`` yields ``AsyncItemPaged[ConversationActions]``; exposes metadata via ``.details``. + :rtype: ~azure.ai.language.conversations.AnalyzeConversationAsyncLROPoller[ + ~azure.core.async_paging.AsyncItemPaged[~azure.ai.language.conversations.models.ConversationActions]] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + method_added_on="2023-04-01", + params_added_on={"2023-04-01": ["api_version", "content_type", "accept"]}, + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + ) + async def begin_analyze_conversation_job( # type: ignore[override] + self, body: Union[AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any + ) -> AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]: + """Analyzes the input conversation utterance. + + :param body: The input for the analyze conversations operation. Required. + :type body: ~azure.ai.language.conversations.models.AnalyzeConversationOperationInput or JSON or IO[bytes] + :return: An async poller whose ``result()`` yields ``AsyncItemPaged[ConversationActions]``; exposes metadata via ``.details``. + :rtype: ~azure.ai.language.conversations.AnalyzeConversationAsyncLROPoller[ + ~azure.core.async_paging.AsyncItemPaged[~azure.ai.language.conversations.models.ConversationActions]] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + polling: Union[bool, AsyncPollingMethod[AsyncItemPaged["ConversationActions"]]] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + cls = kwargs.pop("cls", None) # optional custom deserializer + kwargs.pop("error_map", None) + + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + # ---- paging helpers (state -> AsyncItemPaged[ConversationActions]) + + async def _fetch_state_by_next_link(next_link: str) -> AnalyzeConversationOperationState: + req = HttpRequest("GET", next_link) + resp = await self._client.send_request(req) # type: ignore[attr-defined] + if resp.status_code != 200: + raise HttpResponseError(response=resp) + await resp.read() + data = json.loads(resp.text()) + return AnalyzeConversationOperationState(data) + + def _build_pager_from_state(state: AnalyzeConversationOperationState) -> AsyncItemPaged["ConversationActions"]: + async def extract_data(s: AnalyzeConversationOperationState): + next_link = s.next_link + actions: ConversationActions = s.actions + return next_link, [actions] + + async def get_next(token: Optional[str]) -> Optional[AnalyzeConversationOperationState]: + if token is None: + return state + if not token: + return None + return await _fetch_state_by_next_link(token) + + return AsyncItemPaged(get_next, extract_data) + + # holder to let the deserializer set poller._last_state + poller_holder: Dict[str, AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]]] = {} + + # ---- deserializer: final HTTP -> AsyncItemPaged[ConversationActions] + def get_long_running_output(pipeline_response): + final = pipeline_response.http_response + if final.status_code == 200: + data = json.loads(final.text()) + op_state = AnalyzeConversationOperationState(data) + + poller_ref = poller_holder["poller"] + poller_ref._record_state_for_details(op_state) # pylint:disable=protected-access + + paged = _build_pager_from_state(op_state) + return cls(pipeline_response, paged, {}) if cls else paged + raise HttpResponseError(response=final) + + # ---- polling method + if polling is True: + polling_method: AsyncPollingMethod[AsyncItemPaged["ConversationActions"]] = cast( + AsyncPollingMethod[AsyncItemPaged["ConversationActions"]], + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod[AsyncItemPaged["ConversationActions"]], AsyncNoPolling()) + else: + polling_method = cast(AsyncPollingMethod[AsyncItemPaged["ConversationActions"]], polling) + + # ---- resume path + if cont_token: + return AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + ) + + # ---- submit job + raw_result = await self._analyze_conversation_job_initial( + body=body, + content_type=content_type, + cls=lambda x, y, z: x, # passthrough + headers=_headers, + params=_params, + **kwargs, + ) + # buffer initial body so .text() is available later + await raw_result.http_response.read() # type: ignore[attr-defined] + + # ---- build custom async poller + lro: AnalyzeConversationAsyncLROPoller[AsyncItemPaged["ConversationActions"]] = ( + AnalyzeConversationAsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + ) + poller_holder["poller"] = lro + return lro def patch_sdk(): @@ -19,3 +319,6 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ + + +__all__ = ["ConversationAnalysisClient", "AnalyzeConversationAsyncLROPoller"] \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py index 87676c65a8f0..6403928f0c2f 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py @@ -7,9 +7,19 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -__all__: list[str] = [] # Add all objects you want publicly available to users at this package level +from ._models import ( + AnalyzeConversationOperationInput, + MultiLanguageConversationInput, + SummarizationOperationAction, + ConversationSummarizationActionContent, + AnalyzeConversationOperationAction, + ConversationPiiActionContent, + PiiOperationAction, + CharacterMaskPolicyType, + EntityMaskTypePolicyType, + NoMaskPolicyType, +) +from ._enums import RedactionCharacter def patch_sdk(): @@ -19,3 +29,18 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ + + +__all__ = [ + "AnalyzeConversationOperationInput", + "MultiLanguageConversationInput", + "SummarizationOperationAction", + "ConversationSummarizationActionContent", + "AnalyzeConversationOperationAction", + "ConversationPiiActionContent", + "PiiOperationAction", + "CharacterMaskPolicyType", + "RedactionCharacter", + "EntityMaskTypePolicyType", + "NoMaskPolicyType", +] \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_prediction.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_prediction.py index 2f8fe0b58cf5..3bed1c8091d5 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_prediction.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_prediction.py @@ -4,16 +4,12 @@ from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy from azure.ai.language.conversations import ConversationAnalysisClient from azure.ai.language.conversations.models import ( - AnalyzeConversationOperationInput, ConversationActionContent, ConversationAnalysisInput, TextConversationItem, ConversationActionResult, ConversationPrediction, - ConversationIntent, - ConversationEntity, StringIndexType, - ResolutionBase, DateTimeResolution, ConversationLanguageUnderstandingInput, ) From 860978e0efc02a366c103337983d58c216d67a47 Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 6 Nov 2025 12:16:27 -0800 Subject: [PATCH 07/10] updated test recordings --- .../azure-ai-language-conversations/assets.json | 2 +- .../azure/ai/language/conversations/_patch.py | 4 ++-- .../azure/ai/language/conversations/aio/_patch.py | 4 ++-- .../tests/test_conversation_pii_with_entity_mask_policy.py | 2 +- .../test_conversation_pii_with_entity_mask_policy_async.py | 4 +--- .../tests/test_conversation_pii_with_no_mask_policy.py | 3 +-- .../tests/test_conversation_pii_with_no_mask_policy_async.py | 2 +- 7 files changed, 9 insertions(+), 12 deletions(-) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/assets.json b/sdk/cognitivelanguage/azure-ai-language-conversations/assets.json index 7f81c93a1f73..b1ef97344412 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/assets.json +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/cognitivelanguage/azure-ai-language-conversations", - "Tag": "python/cognitivelanguage/azure-ai-language-conversations_ee7d01046a" + "Tag": "python/cognitivelanguage/azure-ai-language-conversations_37aac6bbbe" } diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py index 28d395e4c5dc..55f7d4bfd357 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py @@ -145,7 +145,7 @@ def __init__( :param credential: Key or token credential. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2025-05-15-preview". Note that overriding this default value may result in unsupported + "2025-11-15-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str` """ @@ -208,7 +208,7 @@ def begin_analyze_conversation_job( @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "content_type", "accept"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) def begin_analyze_conversation_job( # type: ignore[override] self, body: Union[AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py index 55711160700e..ac186400d2b8 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py @@ -140,7 +140,7 @@ def __init__( :param credential: Key or token credential. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2025-05-15-preview". Note that overriding this default value may result in unsupported + "2025-11-15-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str` """ @@ -203,7 +203,7 @@ async def begin_analyze_conversation_job( @api_version_validation( method_added_on="2023-04-01", params_added_on={"2023-04-01": ["api_version", "content_type", "accept"]}, - api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2024-11-15-preview", "2025-05-15-preview"], + api_versions_list=["2023-04-01", "2024-05-01", "2024-11-01", "2025-05-15-preview", "2025-11-15-preview"], ) async def begin_analyze_conversation_job( # type: ignore[override] self, body: Union[AnalyzeConversationOperationInput, JSON, IO[bytes]], **kwargs: Any diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_entity_mask_policy.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_entity_mask_policy.py index e5650451073f..0356c86322c4 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_entity_mask_policy.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_entity_mask_policy.py @@ -34,7 +34,7 @@ ConversationsPreparer = functools.partial( EnvironmentVariableLoader, "conversations", - conversations_endpoint="https://Sanitized.cognitiveservices.azure.com/", + conversations_endpoint="https://Sanitized.azure-api.net/", conversations_key="fake_key", ) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_entity_mask_policy_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_entity_mask_policy_async.py index bd7b5e73119b..1725f1a8be96 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_entity_mask_policy_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_entity_mask_policy_async.py @@ -20,9 +20,7 @@ ConversationalPiiResult, ConversationPiiItemResult, NamedEntity, - InputWarning, ConversationError, - CharacterMaskPolicyType, EntityMaskTypePolicyType, ) import re @@ -32,7 +30,7 @@ ConversationsPreparer = functools.partial( EnvironmentVariableLoader, "conversations", - conversations_endpoint="https://Sanitized.cognitiveservices.azure.com/", + conversations_endpoint="https://Sanitized.azure-api.net/", conversations_key="fake_key", ) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_no_mask_policy.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_no_mask_policy.py index 0390495a723c..5820bb175ede 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_no_mask_policy.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_no_mask_policy.py @@ -19,7 +19,6 @@ ConversationalPiiResult, ConversationPiiItemResult, NamedEntity, - InputWarning, ConversationError, AnalyzeConversationOperationAction, NoMaskPolicyType, @@ -32,7 +31,7 @@ ConversationsPreparer = functools.partial( EnvironmentVariableLoader, "conversations", - conversations_endpoint="https://Sanitized.cognitiveservices.azure.com/", + conversations_endpoint="https://Sanitized.azure-api.net/", conversations_key="fake_key", ) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_no_mask_policy_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_no_mask_policy_async.py index ba71f2f8734a..d0a7a1d9ea48 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_no_mask_policy_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_pii_with_no_mask_policy_async.py @@ -31,7 +31,7 @@ ConversationsPreparer = functools.partial( EnvironmentVariableLoader, "conversations", - conversations_endpoint="https://Sanitized.cognitiveservices.azure.com/", + conversations_endpoint="https://Sanitized.azure-api.net/", conversations_key="fake_key", ) From c05fef86c4eb9a61dda56df9b19b2d183aa9cb15 Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 6 Nov 2025 14:37:23 -0800 Subject: [PATCH 08/10] updated change log --- .../azure-ai-language-conversations/CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md b/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md index 6deeaeb2287c..ba596499ca27 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md @@ -4,6 +4,8 @@ ### Features Added +- Added new PII categories: DriversLicenseNumber, PassportNumber, PersonType, Organization, ABARoutingNumber, BankAccountNumber, DateOfBirth, InternationalBankingAccountNumber, SWIFTCode, VehicleIdentificationNumber, Age, Date, ZipCode, GovernmentIssuedId, CVV, HealthCardNumber, CASocialInsuranceNumber, USMedicareBeneficiaryId, GithubAccount, Location, and GPE. + ## 2.0.0b1 (2025-08-22) ### Features Added From b7739e00dc8ca4ebe9ff6718a4f6588b5c4ce08b Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 6 Nov 2025 15:04:50 -0800 Subject: [PATCH 09/10] updated readme --- .../azure-ai-language-conversations/README.md | 500 +++++++++--------- 1 file changed, 255 insertions(+), 245 deletions(-) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/README.md b/sdk/cognitivelanguage/azure-ai-language-conversations/README.md index a83bec0a6eed..f26509543b01 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/README.md +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/README.md @@ -31,7 +31,7 @@ Install the Azure Conversations client library for Python with [pip][pip_link]: pip install azure-ai-language-conversations ``` -> Note: This version of the client library defaults to the 2025-05-15-preview version of the service +> Note: This version of the client library defaults to the 2025-11-15-preview version of the service ### Authenticate the client In order to interact with the CLU service, you'll need to create an instance of the [ConversationAnalysisClient][conversationanalysisclient_class] class. You will need an **endpoint**, and an **API key** to instantiate a client object. For more information regarding authenticating with Cognitive Services, see [Authenticate requests to Azure Cognitive Services][cognitive_auth]. @@ -101,285 +101,295 @@ The following examples show common scenarios using the `client` [created above]( ### Analyze Text with a Conversation App If you would like to extract custom intents and entities from a user utterance, you can call the `client.analyze_conversation()` method with your conversation's project name as follows: - + ```python -# import libraries import os -from azure.core.credentials import AzureKeyCredential + +from azure.identity import DefaultAzureCredential from azure.ai.language.conversations import ConversationAnalysisClient +from azure.ai.language.conversations.models import ( + ConversationLanguageUnderstandingInput, + ConversationAnalysisInput, + TextConversationItem, + ConversationActionContent, + StringIndexType, + ConversationActionResult, + ConversationPrediction, + DateTimeResolution, +) -# get secrets -clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] -clu_key = os.environ["AZURE_CONVERSATIONS_KEY"] -project_name = os.environ["AZURE_CONVERSATIONS_PROJECT_NAME"] -deployment_name = os.environ["AZURE_CONVERSATIONS_DEPLOYMENT_NAME"] - -# analyze quey -client = ConversationAnalysisClient(clu_endpoint, AzureKeyCredential(clu_key)) -with client: - query = "Send an email to Carol about the tomorrow's demo" - result = client.analyze_conversation( - task={ - "kind": "Conversation", - "analysisInput": { - "conversationItem": { - "participantId": "1", - "id": "1", - "modality": "text", - "language": "en", - "text": query - }, - "isLoggingEnabled": False - }, - "parameters": { - "projectName": project_name, - "deploymentName": deployment_name, - "verbose": True - } - } + +def sample_conversation_prediction(): + # settings + endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + project_name = os.environ["AZURE_CONVERSATIONS_PROJECT_NAME"] + deployment_name = os.environ["AZURE_CONVERSATIONS_DEPLOYMENT_NAME"] + + credential = DefaultAzureCredential() + client = ConversationAnalysisClient(endpoint, credential=credential) + + # build request + data = ConversationLanguageUnderstandingInput( + conversation_input=ConversationAnalysisInput( + conversation_item=TextConversationItem( + id="1", + participant_id="participant1", + text="Send an email to Carol about tomorrow's demo", + ) + ), + action_content=ConversationActionContent( + project_name=project_name, + deployment_name=deployment_name, + string_index_type=StringIndexType.UTF16_CODE_UNIT, + ), ) -# view result -print("query: {}".format(result["result"]["query"])) -print("project kind: {}\n".format(result["result"]["prediction"]["projectKind"])) - -print("top intent: {}".format(result["result"]["prediction"]["topIntent"])) -print("category: {}".format(result["result"]["prediction"]["intents"][0]["category"])) -print("confidence score: {}\n".format(result["result"]["prediction"]["intents"][0]["confidenceScore"])) - -print("entities:") -for entity in result["result"]["prediction"]["entities"]: - print("\ncategory: {}".format(entity["category"])) - print("text: {}".format(entity["text"])) - print("confidence score: {}".format(entity["confidenceScore"])) - if "resolutions" in entity: - print("resolutions") - for resolution in entity["resolutions"]: - print("kind: {}".format(resolution["resolutionKind"])) - print("value: {}".format(resolution["value"])) - if "extraInformation" in entity: - print("extra info") - for data in entity["extraInformation"]: - print("kind: {}".format(data["extraInformationKind"])) - if data["extraInformationKind"] == "ListKey": - print("key: {}".format(data["key"])) - if data["extraInformationKind"] == "EntitySubtype": - print("value: {}".format(data["value"])) + # call sync API + response = client.analyze_conversation(data) + + if isinstance(response, ConversationActionResult): + pred = response.result.prediction + if isinstance(pred, ConversationPrediction): + # top intent + print(f"Top intent: {pred.top_intent}\n") + + # intents + print("Intents:") + for intent in pred.intents or []: + print(f" Category: {intent.category}") + print(f" Confidence: {intent.confidence}") + print() + + # entities + print("Entities:") + for entity in pred.entities or []: + print(f" Category: {entity.category}") + print(f" Text: {entity.text}") + print(f" Offset: {entity.offset}") + print(f" Length: {entity.length}") + print(f" Confidence: {entity.confidence}") + + for res in entity.resolutions or []: + if isinstance(res, DateTimeResolution): + print(" DateTime Resolution:") + print(f" Sub Kind: {res.date_time_sub_kind}") + print(f" Timex: {res.timex}") + print(f" Value: {res.value}") + print() + else: + print("Unexpected result type from analyze_conversation.") ``` + + + ### Analyze Text with an Orchestration App If you would like to pass the user utterance to your orchestrator (worflow) app, you can call the `client.analyze_conversation()` method with your orchestration's project name. The orchestrator project simply orchestrates the submitted user utterance between your language apps (Luis, Conversation, and Question Answering) to get the best response according to the user intent. See the next example: + ```python -# import libraries import os -from azure.core.credentials import AzureKeyCredential + +from azure.identity import DefaultAzureCredential from azure.ai.language.conversations import ConversationAnalysisClient +from azure.ai.language.conversations.models import ( + ConversationActionContent, + ConversationAnalysisInput, + TextConversationItem, + StringIndexType, + ConversationLanguageUnderstandingInput, + OrchestrationPrediction, + QuestionAnsweringTargetIntentResult, + ConversationActionResult, +) + -# get secrets -clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] -clu_key = os.environ["AZURE_CONVERSATIONS_KEY"] -project_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME"] -deployment_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME"] - -# analyze query -client = ConversationAnalysisClient(clu_endpoint, AzureKeyCredential(clu_key)) -with client: - query = "Reserve a table for 2 at the Italian restaurant" - result = client.analyze_conversation( - task={ - "kind": "Conversation", - "analysisInput": { - "conversationItem": { - "participantId": "1", - "id": "1", - "modality": "text", - "language": "en", - "text": query - }, - "isLoggingEnabled": False - }, - "parameters": { - "projectName": project_name, - "deploymentName": deployment_name, - "verbose": True - } - } +def sample_orchestration_prediction(): + # settings + endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + project_name = os.environ["AZURE_CONVERSATIONS_PROJECT_NAME"] + deployment_name = os.environ["AZURE_CONVERSATIONS_DEPLOYMENT_NAME"] + + credential = DefaultAzureCredential() + client = ConversationAnalysisClient(endpoint, credential=credential) + + # Build request using strongly-typed models + data = ConversationLanguageUnderstandingInput( + conversation_input=ConversationAnalysisInput( + conversation_item=TextConversationItem( + id="1", + participant_id="participant1", + text="How are you?", + ) + ), + action_content=ConversationActionContent( + project_name=project_name, + deployment_name=deployment_name, + string_index_type=StringIndexType.UTF16_CODE_UNIT, + ), ) -# view result -print("query: {}".format(result["result"]["query"])) -print("project kind: {}\n".format(result["result"]["prediction"]["projectKind"])) - -# top intent -top_intent = result["result"]["prediction"]["topIntent"] -print("top intent: {}".format(top_intent)) -top_intent_object = result["result"]["prediction"]["intents"][top_intent] -print("confidence score: {}".format(top_intent_object["confidenceScore"])) -print("project kind: {}".format(top_intent_object["targetProjectKind"])) - -if top_intent_object["targetProjectKind"] == "Luis": - print("\nluis response:") - luis_response = top_intent_object["result"]["prediction"] - print("top intent: {}".format(luis_response["topIntent"])) - print("\nentities:") - for entity in luis_response["entities"]: - print("\n{}".format(entity)) + # Call sync API + response = client.analyze_conversation(data) + + # Narrow to expected result types + if isinstance(response, ConversationActionResult): + pred = response.result.prediction + if isinstance(pred, OrchestrationPrediction): + # Top intent name is the routed project name + top_intent = pred.top_intent + if not top_intent: + print("No top intent was returned by orchestration.") + return + + print(f"Top intent (responding project): {top_intent}") + + # Look up the routed target result + target_intent_result = pred.intents.get(top_intent) + if not isinstance(target_intent_result, QuestionAnsweringTargetIntentResult): + print("Top intent did not route to a Question Answering result.") + return + + qa = target_intent_result.result + if qa is not None and qa.answers is not None: + for ans in qa.answers: + print(ans.answer or "") + else: + print("Prediction was not an OrchestrationPrediction.") + else: + print("Unexpected result type from analyze_conversation.") ``` + + ### Conversational Summarization You can use this sample if you need to summarize a conversation in the form of an issue, and final resolution. For example, a dialog from tech support: + + ```python -# import libraries import os -from azure.core.credentials import AzureKeyCredential + +from azure.identity import DefaultAzureCredential from azure.ai.language.conversations import ConversationAnalysisClient -# get secrets -endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] -key = os.environ["AZURE_CONVERSATIONS_KEY"] -# analyze query -client = ConversationAnalysisClient(endpoint, AzureKeyCredential(key)) -with client: - poller = client.begin_conversation_analysis( - task={ - "displayName": "Analyze conversations from xxx", - "analysisInput": { - "conversations": [ - { - "conversationItems": [ - { - "text": "Hello, how can I help you?", - "modality": "text", - "id": "1", - "role": "Agent", - "participantId": "Agent" - }, - { - "text": "How to upgrade Office? I am getting error messages the whole day.", - "modality": "text", - "id": "2", - "role": "Customer", - "participantId": "Customer" - }, - { - "text": "Press the upgrade button please. Then sign in and follow the instructions.", - "modality": "text", - "id": "3", - "role": "Agent", - "participantId": "Agent" - } - ], - "modality": "text", - "id": "conversation1", - "language": "en" - }, - ] - }, - "tasks": [ - { - "taskName": "Issue task", - "kind": "ConversationalSummarizationTask", - "parameters": { - "summaryAspects": ["issue"] - } - }, - { - "taskName": "Resolution task", - "kind": "ConversationalSummarizationTask", - "parameters": { - "summaryAspects": ["resolution"] - } - }, - ] - } - ) +from azure.ai.language.conversations.models import ( + TextConversationItem, + TextConversation, + ParticipantRole, + MultiLanguageConversationInput, + SummarizationOperationAction, + ConversationSummarizationActionContent, + SummaryAspect, + AnalyzeConversationOperationInput, + SummarizationOperationResult, + ConversationError, +) - # view result - result = poller.result() - task_results = result["tasks"]["items"] - for task in task_results: - print(f"\n{task['taskName']} status: {task['status']}") - task_result = task["results"] - if task_result["errors"]: - print("... errors occurred ...") - for error in task_result["errors"]: - print(error) - else: - conversation_result = task_result["conversations"][0] - if conversation_result["warnings"]: - print("... view warnings ...") - for warning in conversation_result["warnings"]: - print(warning) - else: - summaries = conversation_result["summaries"] - print("... view task result ...") - for summary in summaries: - print(f"{summary['aspect']}: {summary['text']}") -``` -### Import a Conversation Project -This sample shows a common scenario for the authoring part of the SDK +def sample_conversation_summarization(): + # settings + endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + credential = DefaultAzureCredential() + + # Build conversation input + conversation_items = [ + TextConversationItem( + id="1", participant_id="Agent_1", text="Hello, how can I help you?", role=ParticipantRole.AGENT + ), + TextConversationItem( + id="2", + participant_id="Customer_1", + text="How to upgrade Office? I am getting error messages the whole day.", + role=ParticipantRole.CUSTOMER, + ), + TextConversationItem( + id="3", + participant_id="Agent_1", + text="Press the upgrade button please. Then sign in and follow the instructions.", + role=ParticipantRole.AGENT, + ), + ] + + conversation_input = MultiLanguageConversationInput( + conversations=[TextConversation(id="1", language="en", conversation_items=conversation_items)] + ) -```python -import os -from azure.core.credentials import AzureKeyCredential -from azure.ai.language.conversations.authoring import ConversationAuthoringClient - -clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] -clu_key = os.environ["AZURE_CONVERSATIONS_KEY"] - -project_name = "test_project" - -exported_project_assets = { - "projectKind": "Conversation", - "intents": [{"category": "Read"}, {"category": "Delete"}], - "entities": [{"category": "Sender"}], - "utterances": [ - { - "text": "Open Blake's email", - "dataset": "Train", - "intent": "Read", - "entities": [{"category": "Sender", "offset": 5, "length": 5}], - }, - { - "text": "Delete last email", - "language": "en-gb", - "dataset": "Test", - "intent": "Delete", - "entities": [], - }, - ], -} - -client = ConversationAuthoringClient( - clu_endpoint, AzureKeyCredential(clu_key) -) -poller = client.begin_import_project( - project_name=project_name, - project={ - "assets": exported_project_assets, - "metadata": { - "projectKind": "Conversation", - "settings": {"confidenceThreshold": 0.7}, - "projectName": "EmailApp", - "multilingual": True, - "description": "Trying out CLU", - "language": "en-us", - }, - "projectFileVersion": "2022-05-01", - "stringIndexType": "Utf16CodeUnit", - }, -) -response = poller.result() -print(response) + # Build the operation input and inline actions + operation_input = AnalyzeConversationOperationInput( + conversation_input=conversation_input, + actions=[ + SummarizationOperationAction( + name="Issue task", + action_content=ConversationSummarizationActionContent(summary_aspects=[SummaryAspect.ISSUE]), + ), + SummarizationOperationAction( + name="Resolution task", + action_content=ConversationSummarizationActionContent(summary_aspects=[SummaryAspect.RESOLUTION]), + ), + ], + ) + client = ConversationAnalysisClient(endpoint, credential=credential) + + poller = client.begin_analyze_conversation_job(body=operation_input) + + # Operation ID + op_id = poller.details.get("operation_id") + if op_id: + print(f"Operation ID: {op_id}") + + # Wait for result + paged_actions = poller.result() + + # Final-state metadata + d = poller.details + print(f"Job ID: {d.get('job_id')}") + print(f"Status: {d.get('status')}") + print(f"Created: {d.get('created_date_time')}") + print(f"Last Updated: {d.get('last_updated_date_time')}") + if d.get("expiration_date_time"): + print(f"Expires: {d.get('expiration_date_time')}") + if d.get("display_name"): + print(f"Display Name: {d.get('display_name')}") + + # Iterate results + for actions_page in paged_actions: + print( + f"Completed: {actions_page.completed}, " + f"In Progress: {actions_page.in_progress}, " + f"Failed: {actions_page.failed}, " + f"Total: {actions_page.total}" + ) + + for action_result in actions_page.task_results or []: + if isinstance(action_result, SummarizationOperationResult): + for conversation in action_result.results.conversations or []: + print(f" Conversation ID: {conversation.id}") + print(" Summaries:") + for summary in conversation.summaries or []: + print(f" Aspect: {summary.aspect}") + print(f" Text: {summary.text}") + if conversation.warnings: + print(" Warnings:") + for warning in conversation.warnings: + print(f" Code: {warning.code}, Message: {warning.message}") + else: + print(" [No supported results to display for this action type]") + + # Errors + if d.get("errors"): + print("\nErrors:") + for error in d["errors"]: + if isinstance(error, ConversationError): + print(f" Code: {error.code} - {error.message}") ``` + + ## Optional Configuration Optional keyword arguments can be passed in at the client and per-operation level. The azure-core [reference documentation][azure_core_ref_docs] describes available configurations for retries, logging, transport protocols, and more. From ea4571a641ee6e66a3ee155c0523a2a1660021b7 Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 6 Nov 2025 15:14:48 -0800 Subject: [PATCH 10/10] fix pylint --- .../ai/language/conversations/_operations/_operations.py | 2 +- .../azure/ai/language/conversations/_patch.py | 2 +- .../azure/ai/language/conversations/_utils/model_base.py | 4 ++-- .../ai/language/conversations/aio/_operations/_operations.py | 2 +- .../azure/ai/language/conversations/aio/_patch.py | 2 +- .../azure/ai/language/conversations/models/_patch.py | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py index b3de4bc23998..47af4ea2806b 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_operations/_operations.py @@ -5,7 +5,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint:disable=import-error from io import IOBase import json from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py index 55f7d4bfd357..a5551cfc7fc0 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_patch.py @@ -316,4 +316,4 @@ def patch_sdk(): """ -__all__ = ["ConversationAnalysisClient", "AnalyzeConversationLROPoller"] \ No newline at end of file +__all__ = ["ConversationAnalysisClient", "AnalyzeConversationLROPoller"] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/model_base.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/model_base.py index 12926fa98dcf..430bda7b37fc 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/model_base.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_utils/model_base.py @@ -22,7 +22,7 @@ from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import xml.etree.ElementTree as ET -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint:disable=import-error from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError @@ -640,7 +640,7 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - return super().__new__(cls) + return super().__new__(cls) # pylint:disable=no-value-for-parameter def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py index 9f1d3b7cbaf7..850d1a0a849a 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_operations/_operations.py @@ -6,7 +6,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint:disable=import-error from io import IOBase import json from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py index ac186400d2b8..849fbbb1b2ca 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_patch.py @@ -321,4 +321,4 @@ def patch_sdk(): """ -__all__ = ["ConversationAnalysisClient", "AnalyzeConversationAsyncLROPoller"] \ No newline at end of file +__all__ = ["ConversationAnalysisClient", "AnalyzeConversationAsyncLROPoller"] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py index 6403928f0c2f..c4c8835375ca 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_patch.py @@ -43,4 +43,4 @@ def patch_sdk(): "RedactionCharacter", "EntityMaskTypePolicyType", "NoMaskPolicyType", -] \ No newline at end of file +]