diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/_meta.json b/sdk/documentintelligence/azure-ai-documentintelligence/_meta.json
new file mode 100644
index 000000000000..b3537199f6dc
--- /dev/null
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/_meta.json
@@ -0,0 +1,6 @@
+{
+ "commit": "e6ae33c2dc2c450ddd1147342b048a4ccd49323e",
+ "repository_url": "https://github.com/test-repo-billy/azure-rest-api-specs",
+ "typespec_src": "specification/ai/DocumentIntelligence",
+ "@azure-tools/typespec-python": "0.31.1"
+}
\ No newline at end of file
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/__init__.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/__init__.py
index 901d45070aa5..5691d1f77801 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/__init__.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/__init__.py
@@ -6,21 +6,23 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from ._patch import DocumentIntelligenceClient
-from ._patch import DocumentIntelligenceAdministrationClient
+from ._client import DocumentIntelligenceClient
+from ._client import DocumentIntelligenceAdministrationClient
from ._version import VERSION
__version__ = VERSION
-
-from ._patch import AnalyzeDocumentLROPoller
+try:
+ from ._patch import __all__ as _patch_all
+ from ._patch import * # pylint: disable=unused-wildcard-import
+except ImportError:
+ _patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
- "AnalyzeDocumentLROPoller",
"DocumentIntelligenceClient",
"DocumentIntelligenceAdministrationClient",
]
-
+__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_model_base.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_model_base.py
index c4b1008c1e85..12ad7f29c71e 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_model_base.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_model_base.py
@@ -4,7 +4,7 @@
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
-# pylint: disable=protected-access, arguments-differ, signature-differs, broad-except
+# pylint: disable=protected-access, arguments-differ, signature-differs, broad-except, too-many-lines
import copy
import calendar
@@ -19,6 +19,7 @@
import email.utils
from datetime import datetime, date, time, timedelta, timezone
from json import JSONEncoder
+import xml.etree.ElementTree as ET
from typing_extensions import Self
import isodate
from azure.core.exceptions import DeserializationError
@@ -123,7 +124,7 @@ def _serialize_datetime(o, format: typing.Optional[str] = None):
def _is_readonly(p):
try:
- return p._visibility == ["read"] # pylint: disable=protected-access
+ return p._visibility == ["read"]
except AttributeError:
return False
@@ -286,6 +287,12 @@ def _deserialize_decimal(attr):
return decimal.Decimal(str(attr))
+def _deserialize_int_as_str(attr):
+ if isinstance(attr, int):
+ return attr
+ return int(attr)
+
+
_DESERIALIZE_MAPPING = {
datetime: _deserialize_datetime,
date: _deserialize_date,
@@ -307,9 +314,11 @@ def _deserialize_decimal(attr):
def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None):
+ if annotation is int and rf and rf._format == "str":
+ return _deserialize_int_as_str
if rf and rf._format:
return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format)
- return _DESERIALIZE_MAPPING.get(annotation)
+ return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore
def _get_type_alias_type(module_name: str, alias_name: str):
@@ -441,6 +450,10 @@ def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-m
return float(o)
if isinstance(o, enum.Enum):
return o.value
+ if isinstance(o, int):
+ if format == "str":
+ return str(o)
+ return o
try:
# First try datetime.datetime
return _serialize_datetime(o, format)
@@ -471,6 +484,8 @@ def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typin
return value
if rf._is_model:
return _deserialize(rf._type, value)
+ if isinstance(value, ET.Element):
+ value = _deserialize(rf._type, value)
return _serialize(value, rf._format)
@@ -489,10 +504,58 @@ def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
for rest_field in self._attr_to_rest_field.values()
if rest_field._default is not _UNSET
}
- if args:
- dict_to_pass.update(
- {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()}
- )
+ if args: # pylint: disable=too-many-nested-blocks
+ if isinstance(args[0], ET.Element):
+ existed_attr_keys = []
+ model_meta = getattr(self, "_xml", {})
+
+ for rf in self._attr_to_rest_field.values():
+ prop_meta = getattr(rf, "_xml", {})
+ xml_name = prop_meta.get("name", rf._rest_name)
+ xml_ns = prop_meta.get("ns", model_meta.get("ns", None))
+ if xml_ns:
+ xml_name = "{" + xml_ns + "}" + xml_name
+
+ # attribute
+ if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None:
+ existed_attr_keys.append(xml_name)
+ dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name))
+ continue
+
+ # unwrapped element is array
+ if prop_meta.get("unwrapped", False):
+ # unwrapped array could either use prop items meta/prop meta
+ if prop_meta.get("itemsName"):
+ xml_name = prop_meta.get("itemsName")
+ xml_ns = prop_meta.get("itemNs")
+ if xml_ns:
+ xml_name = "{" + xml_ns + "}" + xml_name
+ items = args[0].findall(xml_name) # pyright: ignore
+ if len(items) > 0:
+ existed_attr_keys.append(xml_name)
+ dict_to_pass[rf._rest_name] = _deserialize(rf._type, items)
+ continue
+
+ # text element is primitive type
+ if prop_meta.get("text", False):
+ if args[0].text is not None:
+ dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text)
+ continue
+
+ # wrapped element could be normal property or array, it should only have one element
+ item = args[0].find(xml_name)
+ if item is not None:
+ existed_attr_keys.append(xml_name)
+ dict_to_pass[rf._rest_name] = _deserialize(rf._type, item)
+
+ # rest thing is additional properties
+ for e in args[0]:
+ if e.tag not in existed_attr_keys:
+ dict_to_pass[e.tag] = _convert_element(e)
+ else:
+ dict_to_pass.update(
+ {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()}
+ )
else:
non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field]
if non_attr_kwargs:
@@ -541,12 +604,10 @@ def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None:
base.__mapping__[discriminator or cls.__name__] = cls # type: ignore # pylint: disable=no-member
@classmethod
- def _get_discriminator(cls, exist_discriminators) -> typing.Optional[str]:
+ def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]:
for v in cls.__dict__.values():
- if (
- isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators
- ): # pylint: disable=protected-access
- return v._rest_name # pylint: disable=protected-access
+ if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators:
+ return v
return None
@classmethod
@@ -554,11 +615,25 @@ def _deserialize(cls, data, exist_discriminators):
if not hasattr(cls, "__mapping__"): # pylint: disable=no-member
return cls(data)
discriminator = cls._get_discriminator(exist_discriminators)
- exist_discriminators.append(discriminator)
- mapped_cls = cls.__mapping__.get(data.get(discriminator), cls) # pyright: ignore # pylint: disable=no-member
- if mapped_cls == cls:
+ if discriminator is None:
return cls(data)
- return mapped_cls._deserialize(data, exist_discriminators) # pylint: disable=protected-access
+ exist_discriminators.append(discriminator._rest_name)
+ if isinstance(data, ET.Element):
+ model_meta = getattr(cls, "_xml", {})
+ prop_meta = getattr(discriminator, "_xml", {})
+ xml_name = prop_meta.get("name", discriminator._rest_name)
+ xml_ns = prop_meta.get("ns", model_meta.get("ns", None))
+ if xml_ns:
+ xml_name = "{" + xml_ns + "}" + xml_name
+
+ if data.get(xml_name) is not None:
+ discriminator_value = data.get(xml_name)
+ else:
+ discriminator_value = data.find(xml_name).text # pyright: ignore
+ else:
+ discriminator_value = data.get(discriminator._rest_name)
+ mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member
+ return mapped_cls._deserialize(data, exist_discriminators)
def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]:
"""Return a dict that can be JSONify using json.dump.
@@ -624,6 +699,8 @@ def _deserialize_dict(
):
if obj is None:
return obj
+ if isinstance(obj, ET.Element):
+ obj = {child.tag: child for child in obj}
return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()}
@@ -644,6 +721,8 @@ def _deserialize_sequence(
):
if obj is None:
return obj
+ if isinstance(obj, ET.Element):
+ obj = list(obj)
return type(obj)(_deserialize(deserializer, entry, module) for entry in obj)
@@ -659,7 +738,7 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915,
module: typing.Optional[str],
rf: typing.Optional["_RestField"] = None,
) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]:
- if not annotation or annotation in [int, float]:
+ if not annotation:
return None
# is it a type alias?
@@ -734,7 +813,6 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915,
try:
if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore
if len(annotation.__args__) > 1: # pyright: ignore
-
entry_deserializers = [
_get_deserialize_callable_from_annotation(dt, module, rf)
for dt in annotation.__args__ # pyright: ignore
@@ -769,12 +847,23 @@ def _deserialize_default(
def _deserialize_with_callable(
deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]],
value: typing.Any,
-):
+): # pylint: disable=too-many-return-statements
try:
if value is None or isinstance(value, _Null):
return None
+ if isinstance(value, ET.Element):
+ if deserializer is str:
+ return value.text or ""
+ if deserializer is int:
+ return int(value.text) if value.text else None
+ if deserializer is float:
+ return float(value.text) if value.text else None
+ if deserializer is bool:
+ return value.text == "true" if value.text else None
if deserializer is None:
return value
+ if deserializer in [int, float, bool]:
+ return deserializer(value)
if isinstance(deserializer, CaseInsensitiveEnumMeta):
try:
return deserializer(value)
@@ -815,6 +904,7 @@ def __init__(
default: typing.Any = _UNSET,
format: typing.Optional[str] = None,
is_multipart_file_input: bool = False,
+ xml: typing.Optional[typing.Dict[str, typing.Any]] = None,
):
self._type = type
self._rest_name_input = name
@@ -825,6 +915,7 @@ def __init__(
self._default = default
self._format = format
self._is_multipart_file_input = is_multipart_file_input
+ self._xml = xml if xml is not None else {}
@property
def _class_type(self) -> typing.Any:
@@ -875,6 +966,7 @@ def rest_field(
default: typing.Any = _UNSET,
format: typing.Optional[str] = None,
is_multipart_file_input: bool = False,
+ xml: typing.Optional[typing.Dict[str, typing.Any]] = None,
) -> typing.Any:
return _RestField(
name=name,
@@ -883,6 +975,7 @@ def rest_field(
default=default,
format=format,
is_multipart_file_input=is_multipart_file_input,
+ xml=xml,
)
@@ -891,5 +984,175 @@ def rest_discriminator(
name: typing.Optional[str] = None,
type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin
visibility: typing.Optional[typing.List[str]] = None,
+ xml: typing.Optional[typing.Dict[str, typing.Any]] = None,
+) -> typing.Any:
+ return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml)
+
+
+def serialize_xml(model: Model, exclude_readonly: bool = False) -> str:
+ """Serialize a model to XML.
+
+ :param Model model: The model to serialize.
+ :param bool exclude_readonly: Whether to exclude readonly properties.
+ :returns: The XML representation of the model.
+ :rtype: str
+ """
+ return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore
+
+
+def _get_element(
+ o: typing.Any,
+ exclude_readonly: bool = False,
+ parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None,
+ wrapped_element: typing.Optional[ET.Element] = None,
+) -> typing.Union[ET.Element, typing.List[ET.Element]]:
+ if _is_model(o):
+ model_meta = getattr(o, "_xml", {})
+
+ # if prop is a model, then use the prop element directly, else generate a wrapper of model
+ if wrapped_element is None:
+ wrapped_element = _create_xml_element(
+ model_meta.get("name", o.__class__.__name__),
+ model_meta.get("prefix"),
+ model_meta.get("ns"),
+ )
+
+ readonly_props = []
+ if exclude_readonly:
+ readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)]
+
+ for k, v in o.items():
+ # do not serialize readonly properties
+ if exclude_readonly and k in readonly_props:
+ continue
+
+ prop_rest_field = _get_rest_field(o._attr_to_rest_field, k)
+ if prop_rest_field:
+ prop_meta = getattr(prop_rest_field, "_xml").copy()
+ # use the wire name as xml name if no specific name is set
+ if prop_meta.get("name") is None:
+ prop_meta["name"] = k
+ else:
+ # additional properties will not have rest field, use the wire name as xml name
+ prop_meta = {"name": k}
+
+ # if no ns for prop, use model's
+ if prop_meta.get("ns") is None and model_meta.get("ns"):
+ prop_meta["ns"] = model_meta.get("ns")
+ prop_meta["prefix"] = model_meta.get("prefix")
+
+ if prop_meta.get("unwrapped", False):
+ # unwrapped could only set on array
+ wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta))
+ elif prop_meta.get("text", False):
+ # text could only set on primitive type
+ wrapped_element.text = _get_primitive_type_value(v)
+ elif prop_meta.get("attribute", False):
+ xml_name = prop_meta.get("name", k)
+ if prop_meta.get("ns"):
+ ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore
+ xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore
+ # attribute should be primitive type
+ wrapped_element.set(xml_name, _get_primitive_type_value(v))
+ else:
+ # other wrapped prop element
+ wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta))
+ return wrapped_element
+ if isinstance(o, list):
+ return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore
+ if isinstance(o, dict):
+ result = []
+ for k, v in o.items():
+ result.append(
+ _get_wrapped_element(
+ v,
+ exclude_readonly,
+ {
+ "name": k,
+ "ns": parent_meta.get("ns") if parent_meta else None,
+ "prefix": parent_meta.get("prefix") if parent_meta else None,
+ },
+ )
+ )
+ return result
+
+ # primitive case need to create element based on parent_meta
+ if parent_meta:
+ return _get_wrapped_element(
+ o,
+ exclude_readonly,
+ {
+ "name": parent_meta.get("itemsName", parent_meta.get("name")),
+ "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")),
+ "ns": parent_meta.get("itemsNs", parent_meta.get("ns")),
+ },
+ )
+
+ raise ValueError("Could not serialize value into xml: " + o)
+
+
+def _get_wrapped_element(
+ v: typing.Any,
+ exclude_readonly: bool,
+ meta: typing.Optional[typing.Dict[str, typing.Any]],
+) -> ET.Element:
+ wrapped_element = _create_xml_element(
+ meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None
+ )
+ if isinstance(v, (dict, list)):
+ wrapped_element.extend(_get_element(v, exclude_readonly, meta))
+ elif _is_model(v):
+ _get_element(v, exclude_readonly, meta, wrapped_element)
+ else:
+ wrapped_element.text = _get_primitive_type_value(v)
+ return wrapped_element
+
+
+def _get_primitive_type_value(v) -> str:
+ if v is True:
+ return "true"
+ if v is False:
+ return "false"
+ if isinstance(v, _Null):
+ return ""
+ return str(v)
+
+
+def _create_xml_element(tag, prefix=None, ns=None):
+ if prefix and ns:
+ ET.register_namespace(prefix, ns)
+ if ns:
+ return ET.Element("{" + ns + "}" + tag)
+ return ET.Element(tag)
+
+
+def _deserialize_xml(
+ deserializer: typing.Any,
+ value: str,
) -> typing.Any:
- return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility)
+ element = ET.fromstring(value) # nosec
+ return _deserialize(deserializer, element)
+
+
+def _convert_element(e: ET.Element):
+ # dict case
+ if len(e.attrib) > 0 or len({child.tag for child in e}) > 1:
+ dict_result: typing.Dict[str, typing.Any] = {}
+ for child in e:
+ if dict_result.get(child.tag) is not None:
+ if isinstance(dict_result[child.tag], list):
+ dict_result[child.tag].append(_convert_element(child))
+ else:
+ dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)]
+ else:
+ dict_result[child.tag] = _convert_element(child)
+ dict_result.update(e.attrib)
+ return dict_result
+ # array case
+ if len(e) > 0:
+ array_result: typing.List[typing.Any] = []
+ for child in e:
+ array_result.append(_convert_element(child))
+ return array_result
+ # primitive case
+ return e.text
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/__init__.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/__init__.py
index 36334aa2ea34..057c6c92037f 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/__init__.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/__init__.py
@@ -6,15 +6,16 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from ._patch import DocumentIntelligenceClientOperationsMixin
-from ._patch import DocumentIntelligenceAdministrationClientOperationsMixin
-
+from ._operations import DocumentIntelligenceClientOperationsMixin
+from ._operations import DocumentIntelligenceAdministrationClientOperationsMixin
+from ._patch import __all__ as _patch_all
+from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"DocumentIntelligenceClientOperationsMixin",
"DocumentIntelligenceAdministrationClientOperationsMixin",
]
-
+__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_operations.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_operations.py
index 78a22b25704a..b7ae88e02fba 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_operations.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_operations.py
@@ -654,7 +654,7 @@ def _analyze_document_initial(
output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
**kwargs: Any,
) -> Iterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -747,7 +747,7 @@ def begin_analyze_document(
:type model_id: str
:param analyze_request: Analyze request parameters. Default value is None.
:type analyze_request: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -799,7 +799,7 @@ def begin_analyze_document(
:type model_id: str
:param analyze_request: Analyze request parameters. Default value is None.
:type analyze_request: JSON
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -851,7 +851,7 @@ def begin_analyze_document(
:type model_id: str
:param analyze_request: Analyze request parameters. Default value is None.
:type analyze_request: IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -904,7 +904,7 @@ def begin_analyze_document(
AnalyzeDocumentRequest, JSON, IO[bytes] Default value is None.
:type analyze_request: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest or JSON or
IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -1008,7 +1008,7 @@ def _analyze_batch_documents_initial(
output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
**kwargs: Any,
) -> Iterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1101,7 +1101,7 @@ def begin_analyze_batch_documents(
:type model_id: str
:param analyze_batch_request: Analyze batch request parameters. Default value is None.
:type analyze_batch_request: ~azure.ai.documentintelligence.models.AnalyzeBatchDocumentsRequest
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -1153,7 +1153,7 @@ def begin_analyze_batch_documents(
:type model_id: str
:param analyze_batch_request: Analyze batch request parameters. Default value is None.
:type analyze_batch_request: JSON
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -1205,7 +1205,7 @@ def begin_analyze_batch_documents(
:type model_id: str
:param analyze_batch_request: Analyze batch request parameters. Default value is None.
:type analyze_batch_request: IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -1258,7 +1258,7 @@ def begin_analyze_batch_documents(
AnalyzeBatchDocumentsRequest, JSON, IO[bytes] Default value is None.
:type analyze_batch_request: ~azure.ai.documentintelligence.models.AnalyzeBatchDocumentsRequest
or JSON or IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -1360,7 +1360,7 @@ def get_analyze_result_pdf(self, model_id: str, result_id: str, **kwargs: Any) -
:rtype: Iterator[bytes]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1428,7 +1428,7 @@ def get_analyze_result_figure(
:rtype: Iterator[bytes]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1491,7 +1491,7 @@ def _classify_document_initial(
pages: Optional[str] = None,
**kwargs: Any,
) -> Iterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1579,7 +1579,7 @@ def begin_classify_document(
:keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage".
Default value is None.
:paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
@@ -1615,7 +1615,7 @@ def begin_classify_document(
:keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage".
Default value is None.
:paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
@@ -1651,7 +1651,7 @@ def begin_classify_document(
:keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage".
Default value is None.
:paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
@@ -1688,7 +1688,7 @@ def begin_classify_document(
:keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage".
Default value is None.
:paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:return: An instance of LROPoller that returns AnalyzeResult. The AnalyzeResult is compatible
@@ -1764,7 +1764,7 @@ class DocumentIntelligenceAdministrationClientOperationsMixin( # pylint: disabl
def _build_document_model_initial(
self, build_request: Union[_models.BuildDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any
) -> Iterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1953,7 +1953,7 @@ def get_long_running_output(pipeline_response):
def _compose_model_initial(
self, compose_request: Union[_models.ComposeDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any
) -> Iterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2213,7 +2213,7 @@ def authorize_model_copy(
:rtype: ~azure.ai.documentintelligence.models.CopyAuthorization
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2276,7 +2276,7 @@ def authorize_model_copy(
def _copy_model_to_initial(
self, model_id: str, copy_to_request: Union[_models.CopyAuthorization, JSON, IO[bytes]], **kwargs: Any
) -> Iterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2487,7 +2487,7 @@ def get_model(self, model_id: str, **kwargs: Any) -> _models.DocumentModelDetail
:rtype: ~azure.ai.documentintelligence.models.DocumentModelDetails
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2557,7 +2557,7 @@ def list_models(self, **kwargs: Any) -> Iterable["_models.DocumentModelDetails"]
cls: ClsType[List[_models.DocumentModelDetails]] = kwargs.pop("cls", None)
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2637,7 +2637,7 @@ def delete_model(self, model_id: str, **kwargs: Any) -> None: # pylint: disable
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2689,7 +2689,7 @@ def get_resource_info(self, **kwargs: Any) -> _models.ResourceDetails:
:rtype: ~azure.ai.documentintelligence.models.ResourceDetails
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2749,7 +2749,7 @@ def get_operation(self, operation_id: str, **kwargs: Any) -> _models.OperationDe
:rtype: ~azure.ai.documentintelligence.models.OperationDetails
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2818,7 +2818,7 @@ def list_operations(self, **kwargs: Any) -> Iterable["_models.OperationDetails"]
cls: ClsType[List[_models.OperationDetails]] = kwargs.pop("cls", None)
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2891,7 +2891,7 @@ def get_next(next_link=None):
def _build_classifier_initial(
self, build_request: Union[_models.BuildDocumentClassifierRequest, JSON, IO[bytes]], **kwargs: Any
) -> Iterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -3156,7 +3156,7 @@ def authorize_classifier_copy(
:rtype: ~azure.ai.documentintelligence.models.ClassifierCopyAuthorization
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -3222,7 +3222,7 @@ def _copy_classifier_to_initial(
copy_to_request: Union[_models.ClassifierCopyAuthorization, JSON, IO[bytes]],
**kwargs: Any,
) -> Iterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -3437,7 +3437,7 @@ def get_classifier(self, classifier_id: str, **kwargs: Any) -> _models.DocumentC
:rtype: ~azure.ai.documentintelligence.models.DocumentClassifierDetails
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -3507,7 +3507,7 @@ def list_classifiers(self, **kwargs: Any) -> Iterable["_models.DocumentClassifie
cls: ClsType[List[_models.DocumentClassifierDetails]] = kwargs.pop("cls", None)
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -3589,7 +3589,7 @@ def delete_classifier( # pylint: disable=inconsistent-return-statements
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_patch.py
index b0bc39b482c2..f7dd32510333 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_patch.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_patch.py
@@ -6,624 +6,9 @@
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
-import sys
-import re
-from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, Mapping, cast, overload
+from typing import List
-from azure.core.pipeline import PipelineResponse
-from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.polling.base_polling import LROBasePolling
-from azure.core.rest import HttpRequest, HttpResponse
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.utils import case_insensitive_dict
-
-from ._operations import (
- DocumentIntelligenceClientOperationsMixin as GeneratedDIClientOps,
- DocumentIntelligenceAdministrationClientOperationsMixin as GeneratedDIAdminClientOps,
-)
-from .. import models as _models
-from .._model_base import _deserialize
-
-if sys.version_info >= (3, 9):
- from collections.abc import MutableMapping
-else:
- from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
-JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
-T = TypeVar("T")
-ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
-PollingReturnType_co = TypeVar("PollingReturnType_co", covariant=True)
-_FINISHED = frozenset(["succeeded", "canceled", "failed", "completed"])
-
-
-def _parse_operation_id(operation_location_header):
- regex = "[^:]+://[^/]+/documentintelligence/.+/([^?/]+)"
- return re.match(regex, operation_location_header).group(1)
-
-def _finished(status) -> bool:
- if hasattr(status, "value"):
- status = status.value
- return str(status).lower() in _FINISHED
-
-
-class AnalyzeDocumentLROPoller(LROPoller[PollingReturnType_co]):
- @property
- def details(self) -> Mapping[str, Any]:
- """Returns metadata associated with the long-running operation.
-
- :return: Returns metadata associated with the long-running operation.
- :rtype: Mapping[str, Any]
- """
- return {
- "operation_id": _parse_operation_id(
- self.polling_method()._initial_response.http_response.headers["Operation-Location"] # type: ignore # pylint: disable=protected-access
- ),
- }
-
- @classmethod
- def from_continuation_token(
- cls, polling_method: PollingMethod[PollingReturnType_co], continuation_token: str, **kwargs: Any
- ) -> "AnalyzeDocumentLROPoller":
- (
- client,
- initial_response,
- deserialization_callback,
- ) = polling_method.from_continuation_token(continuation_token, **kwargs)
-
- return cls(client, initial_response, deserialization_callback, polling_method)
-
-
-class AnalyzeBatchDocumentsLROPollingMethod(LROBasePolling):
- def finished(self) -> bool:
- """Is this polling finished?
-
- :return: Whether polling is finished or not.
- :rtype: bool
- """
- return _finished(self.status())
-
-
-class DocumentIntelligenceAdministrationClientOperationsMixin(
- GeneratedDIAdminClientOps
-): # pylint: disable=name-too-long
- @distributed_trace
- def begin_build_classifier(
- self, build_request: Union[_models.BuildDocumentClassifierRequest, JSON, IO[bytes]], **kwargs: Any
- ) -> LROPoller[_models.DocumentClassifierDetails]:
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DocumentClassifierDetails] = kwargs.pop("cls", None)
- polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = self._build_classifier_initial( # type: ignore
- build_request=build_request,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs,
- )
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.DocumentClassifierDetails, response.json())
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: PollingMethod = cast(
- PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
- )
- elif polling is False:
- polling_method = cast(PollingMethod, NoPolling())
- else:
- polling_method = polling
- if cont_token:
- return LROPoller[_models.DocumentClassifierDetails].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return LROPoller[_models.DocumentClassifierDetails](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
- @distributed_trace
- def begin_build_document_model(
- self, build_request: Union[_models.BuildDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any
- ) -> LROPoller[_models.DocumentModelDetails]:
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None)
- polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = self._build_document_model_initial( # type: ignore
- build_request=build_request,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs,
- )
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.DocumentModelDetails, response.json())
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: PollingMethod = cast(
- PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
- )
- elif polling is False:
- polling_method = cast(PollingMethod, NoPolling())
- else:
- polling_method = polling
- if cont_token:
- return LROPoller[_models.DocumentModelDetails].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return LROPoller[_models.DocumentModelDetails](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
- @distributed_trace
- def begin_compose_model(
- self, compose_request: Union[_models.ComposeDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any
- ) -> LROPoller[_models.DocumentModelDetails]:
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None)
- polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = self._compose_model_initial( # type: ignore
- compose_request=compose_request,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs,
- )
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.DocumentModelDetails, response.json())
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: PollingMethod = cast(
- PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
- )
- elif polling is False:
- polling_method = cast(PollingMethod, NoPolling())
- else:
- polling_method = polling
- if cont_token:
- return LROPoller[_models.DocumentModelDetails].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return LROPoller[_models.DocumentModelDetails](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
- @distributed_trace
- def begin_copy_model_to(
- self, model_id: str, copy_to_request: Union[_models.CopyAuthorization, JSON, IO[bytes]], **kwargs: Any
- ) -> LROPoller[_models.DocumentModelDetails]:
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None)
- polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = self._copy_model_to_initial( # type: ignore
- model_id=model_id,
- copy_to_request=copy_to_request,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs,
- )
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.DocumentModelDetails, response.json())
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: PollingMethod = cast(
- PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
- )
- elif polling is False:
- polling_method = cast(PollingMethod, NoPolling())
- else:
- polling_method = polling
- if cont_token:
- return LROPoller[_models.DocumentModelDetails].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return LROPoller[_models.DocumentModelDetails](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
-
-class DocumentIntelligenceClientOperationsMixin(GeneratedDIClientOps): # pylint: disable=name-too-long
- @overload
- def begin_analyze_document(
- self,
- model_id: str,
- analyze_request: Optional[_models.AnalyzeDocumentRequest] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- content_type: str = "application/json",
- **kwargs: Any,
- ) -> AnalyzeDocumentLROPoller[_models.AnalyzeResult]:
- """Analyzes document with document model.
-
- :param model_id: Unique document model name. Required.
- :type model_id: str
- :param analyze_request: Analyze request parameters. Default value is None.
- :type analyze_request: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
- None.
- :paramtype pages: str
- :keyword locale: Locale hint for text recognition and document analysis. Value may contain
- only
- the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is
- None.
- :paramtype locale: str
- :keyword string_index_type: Method used to compute string offset and length. Known values are:
- "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None.
- :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType
- :keyword features: List of optional analysis features. Default value is None.
- :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature]
- :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber".
- Default value is None.
- :paramtype query_fields: list[str]
- :keyword output_content_format: Format of the analyze result top-level content. Known values
- are: "text" and "markdown". Default value is None.
- :paramtype output_content_format: str or ~azure.ai.documentintelligence.models.ContentFormat
- :keyword output: Additional outputs to generate during analysis. Default value is None.
- :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption]
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: An instance of AnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is compatible
- with MutableMapping
- :rtype: AnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
- @overload
- def begin_analyze_document(
- self,
- model_id: str,
- analyze_request: Optional[JSON] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- content_type: str = "application/json",
- **kwargs: Any,
- ) -> AnalyzeDocumentLROPoller[_models.AnalyzeResult]:
- """Analyzes document with document model.
-
- :param model_id: Unique document model name. Required.
- :type model_id: str
- :param analyze_request: Analyze request parameters. Default value is None.
- :type analyze_request: JSON
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
- None.
- :paramtype pages: str
- :keyword locale: Locale hint for text recognition and document analysis. Value may contain
- only
- the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is
- None.
- :paramtype locale: str
- :keyword string_index_type: Method used to compute string offset and length. Known values are:
- "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None.
- :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType
- :keyword features: List of optional analysis features. Default value is None.
- :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature]
- :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber".
- Default value is None.
- :paramtype query_fields: list[str]
- :keyword output_content_format: Format of the analyze result top-level content. Known values
- are: "text" and "markdown". Default value is None.
- :paramtype output_content_format: str or ~azure.ai.documentintelligence.models.ContentFormat
- :keyword output: Additional outputs to generate during analysis. Default value is None.
- :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption]
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: An instance of AnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is compatible
- with MutableMapping
- :rtype: AnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
- @overload
- def begin_analyze_document(
- self,
- model_id: str,
- analyze_request: Optional[IO[bytes]] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- content_type: str = "application/json",
- **kwargs: Any,
- ) -> AnalyzeDocumentLROPoller[_models.AnalyzeResult]:
- """Analyzes document with document model.
-
- :param model_id: Unique document model name. Required.
- :type model_id: str
- :param analyze_request: Analyze request parameters. Default value is None.
- :type analyze_request: IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
- None.
- :paramtype pages: str
- :keyword locale: Locale hint for text recognition and document analysis. Value may contain
- only
- the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is
- None.
- :paramtype locale: str
- :keyword string_index_type: Method used to compute string offset and length. Known values are:
- "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None.
- :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType
- :keyword features: List of optional analysis features. Default value is None.
- :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature]
- :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber".
- Default value is None.
- :paramtype query_fields: list[str]
- :keyword output_content_format: Format of the analyze result top-level content. Known values
- are: "text" and "markdown". Default value is None.
- :paramtype output_content_format: str or ~azure.ai.documentintelligence.models.ContentFormat
- :keyword output: Additional outputs to generate during analysis. Default value is None.
- :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: An instance of AnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is compatible
- with MutableMapping
- :rtype: AnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
- @distributed_trace
- def begin_analyze_document(
- self,
- model_id: str,
- analyze_request: Optional[Union[_models.AnalyzeDocumentRequest, JSON, IO[bytes]]] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- **kwargs: Any,
- ) -> AnalyzeDocumentLROPoller[_models.AnalyzeResult]:
- """Analyzes document with document model.
-
- :param model_id: Unique document model name. Required.
- :type model_id: str
- :param analyze_request: Analyze request parameters. Is one of the following types:
- AnalyzeDocumentRequest, JSON, IO[bytes] Default value is None.
- :type analyze_request: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest or JSON or
- IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
- None.
- :paramtype pages: str
- :keyword locale: Locale hint for text recognition and document analysis. Value may contain
- only
- the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is
- None.
- :paramtype locale: str
- :keyword string_index_type: Method used to compute string offset and length. Known values are:
- "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None.
- :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType
- :keyword features: List of optional analysis features. Default value is None.
- :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature]
- :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber".
- Default value is None.
- :paramtype query_fields: list[str]
- :keyword output_content_format: Format of the analyze result top-level content. Known values
- are: "text" and "markdown". Default value is None.
- :paramtype output_content_format: str or ~azure.ai.documentintelligence.models.ContentFormat
- :keyword output: Additional outputs to generate during analysis. Default value is None.
- :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption]
- :return: An instance of AnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is compatible
- with MutableMapping
- :rtype: AnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None))
- cls: ClsType[_models.AnalyzeResult] = kwargs.pop("cls", None)
- polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = self._analyze_document_initial(
- model_id=model_id,
- analyze_request=analyze_request,
- pages=pages,
- locale=locale,
- string_index_type=string_index_type,
- features=features,
- query_fields=query_fields,
- output_content_format=output_content_format,
- output=output,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs,
- )
- raw_result.http_response.read() # type: ignore
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: PollingMethod = cast(
- PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
- )
- elif polling is False:
- polling_method = cast(PollingMethod, NoPolling())
- else:
- polling_method = polling
- if cont_token:
- return AnalyzeDocumentLROPoller[_models.AnalyzeResult].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return AnalyzeDocumentLROPoller[_models.AnalyzeResult](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
- @distributed_trace
- def begin_analyze_batch_documents(
- self,
- model_id: str,
- analyze_batch_request: Optional[Union[_models.AnalyzeBatchDocumentsRequest, JSON, IO[bytes]]] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- **kwargs: Any,
- ) -> LROPoller[_models.AnalyzeBatchResult]:
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- return super().begin_analyze_batch_documents(
- model_id=model_id,
- analyze_batch_request=analyze_batch_request,
- pages=pages,
- locale=locale,
- string_index_type=string_index_type,
- features=features,
- query_fields=query_fields,
- output_content_format=output_content_format,
- output=output,
- polling=AnalyzeBatchDocumentsLROPollingMethod(timeout=lro_delay),
- **kwargs,
- )
-
-
-__all__: List[str] = [
- "DocumentIntelligenceClientOperationsMixin",
- "DocumentIntelligenceAdministrationClientOperationsMixin",
-] # Add all objects you want publicly available to users at this package level
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
def patch_sdk():
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_patch.py
index 2b05dac1b519..f7dd32510333 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_patch.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_patch.py
@@ -6,88 +6,9 @@
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
-from typing import Any, List, Union
-from azure.core.credentials import AzureKeyCredential, TokenCredential
-from ._client import (
- DocumentIntelligenceClient as DIClientGenerated,
- DocumentIntelligenceAdministrationClient as DIAClientGenerated,
-)
-from ._operations._patch import AnalyzeDocumentLROPoller
+from typing import List
-
-class DocumentIntelligenceClient(DIClientGenerated): # pylint: disable=client-accepts-api-version-keyword
- """DocumentIntelligenceClient.
-
- :param endpoint: The Document Intelligence service endpoint. Required.
- :type endpoint: str
- :param credential: Credential needed for the client to connect to Azure. Is either a
- AzureKeyCredential type or a TokenCredential type. Required.
- :type credential: ~azure.core.credentials.AzureKeyCredential or
- ~azure.core.credentials.TokenCredential
- :keyword api_version: The API version to use for this operation. Default value is
- "2024-07-31-preview". Note that overriding this default value may result in unsupported
- behavior.
- :paramtype api_version: str
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- """
-
- def __init__(
- self,
- endpoint: str,
- credential: Union[AzureKeyCredential, TokenCredential],
- **kwargs: Any,
- ) -> None:
- # Patch the default polling interval to be 1s.
- polling_interval = kwargs.pop("polling_interval", 1)
- super().__init__(
- endpoint=endpoint,
- credential=credential,
- polling_interval=polling_interval,
- **kwargs,
- )
-
-
-class DocumentIntelligenceAdministrationClient(
- DIAClientGenerated
-): # pylint: disable=client-accepts-api-version-keyword
- """DocumentIntelligenceAdministrationClient.
-
- :param endpoint: The Document Intelligence service endpoint. Required.
- :type endpoint: str
- :param credential: Credential needed for the client to connect to Azure. Is either a
- AzureKeyCredential type or a TokenCredential type. Required.
- :type credential: ~azure.core.credentials.AzureKeyCredential or
- ~azure.core.credentials.TokenCredential
- :keyword api_version: The API version to use for this operation. Default value is
- "2024-07-31-preview". Note that overriding this default value may result in unsupported
- behavior.
- :paramtype api_version: str
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- """
-
- def __init__(
- self,
- endpoint: str,
- credential: Union[AzureKeyCredential, TokenCredential],
- **kwargs: Any,
- ) -> None:
- # Patch the default polling interval to be 1s.
- polling_interval = kwargs.pop("polling_interval", 1)
- super().__init__(
- endpoint=endpoint,
- credential=credential,
- polling_interval=polling_interval,
- **kwargs,
- )
-
-
-__all__: List[str] = [
- "DocumentIntelligenceClient",
- "DocumentIntelligenceAdministrationClient",
- "AnalyzeDocumentLROPoller",
-] # Add all objects you want publicly available to users at this package level
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
def patch_sdk():
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_serialization.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_serialization.py
index 8139854b97bb..01a226bd7f14 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_serialization.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_serialization.py
@@ -1,3 +1,4 @@
+# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -24,7 +25,6 @@
#
# --------------------------------------------------------------------------
-# pylint: skip-file
# pyright: reportUnnecessaryTypeIgnoreComment=false
from base64 import b64decode, b64encode
@@ -52,7 +52,6 @@
MutableMapping,
Type,
List,
- Mapping,
)
try:
@@ -91,6 +90,8 @@ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type:
:param data: Input, could be bytes or stream (will be decoded with UTF8) or text
:type data: str or bytes or IO
:param str content_type: The content type.
+ :return: The deserialized data.
+ :rtype: object
"""
if hasattr(data, "read"):
# Assume a stream
@@ -112,7 +113,7 @@ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type:
try:
return json.loads(data_as_str)
except ValueError as err:
- raise DeserializationError("JSON is invalid: {}".format(err), err)
+ raise DeserializationError("JSON is invalid: {}".format(err), err) from err
elif "xml" in (content_type or []):
try:
@@ -155,6 +156,11 @@ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]],
Use bytes and headers to NOT use any requests/aiohttp or whatever
specific implementation.
Headers will tested for "content-type"
+
+ :param bytes body_bytes: The body of the response.
+ :param dict headers: The headers of the response.
+ :returns: The deserialized data.
+ :rtype: object
"""
# Try to use content-type from headers if available
content_type = None
@@ -184,15 +190,30 @@ class UTC(datetime.tzinfo):
"""Time Zone info for handling UTC"""
def utcoffset(self, dt):
- """UTF offset for UTC is 0."""
+ """UTF offset for UTC is 0.
+
+ :param datetime.datetime dt: The datetime
+ :returns: The offset
+ :rtype: datetime.timedelta
+ """
return datetime.timedelta(0)
def tzname(self, dt):
- """Timestamp representation."""
+ """Timestamp representation.
+
+ :param datetime.datetime dt: The datetime
+ :returns: The timestamp representation
+ :rtype: str
+ """
return "Z"
def dst(self, dt):
- """No daylight saving for UTC."""
+ """No daylight saving for UTC.
+
+ :param datetime.datetime dt: The datetime
+ :returns: The daylight saving time
+ :rtype: datetime.timedelta
+ """
return datetime.timedelta(hours=1)
@@ -235,24 +256,26 @@ def __getinitargs__(self):
_FLATTEN = re.compile(r"(? None:
self.additional_properties: Optional[Dict[str, Any]] = {}
- for k in kwargs:
+ for k in kwargs: # pylint: disable=consider-using-dict-items
if k not in self._attribute_map:
_LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__)
elif k in self._validation and self._validation[k].get("readonly", False):
@@ -300,13 +330,23 @@ def __init__(self, **kwargs: Any) -> None:
setattr(self, k, kwargs[k])
def __eq__(self, other: Any) -> bool:
- """Compare objects by comparing all attributes."""
+ """Compare objects by comparing all attributes.
+
+ :param object other: The object to compare
+ :returns: True if objects are equal
+ :rtype: bool
+ """
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other: Any) -> bool:
- """Compare objects by comparing all attributes."""
+ """Compare objects by comparing all attributes.
+
+ :param object other: The object to compare
+ :returns: True if objects are not equal
+ :rtype: bool
+ """
return not self.__eq__(other)
def __str__(self) -> str:
@@ -326,7 +366,11 @@ def is_xml_model(cls) -> bool:
@classmethod
def _create_xml_node(cls):
- """Create XML node."""
+ """Create XML node.
+
+ :returns: The XML node
+ :rtype: xml.etree.ElementTree.Element
+ """
try:
xml_map = cls._xml_map # type: ignore
except AttributeError:
@@ -346,7 +390,9 @@ def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON:
:rtype: dict
"""
serializer = Serializer(self._infer_class_models())
- return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs) # type: ignore
+ return serializer._serialize( # type: ignore # pylint: disable=protected-access
+ self, keep_readonly=keep_readonly, **kwargs
+ )
def as_dict(
self,
@@ -380,12 +426,15 @@ def my_key_transformer(key, attr_desc, value):
If you want XML serialization, you can pass the kwargs is_xml=True.
+ :param bool keep_readonly: If you want to serialize the readonly attributes
:param function key_transformer: A key transformer function.
:returns: A dict JSON compatible object
:rtype: dict
"""
serializer = Serializer(self._infer_class_models())
- return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs) # type: ignore
+ return serializer._serialize( # type: ignore # pylint: disable=protected-access
+ self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs
+ )
@classmethod
def _infer_class_models(cls):
@@ -395,7 +444,7 @@ def _infer_class_models(cls):
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
if cls.__name__ not in client_models:
raise ValueError("Not Autorest generated code")
- except Exception:
+ except Exception: # pylint: disable=broad-exception-caught
# Assume it's not Autorest generated (tests?). Add ourselves as dependencies.
client_models = {cls.__name__: cls}
return client_models
@@ -408,6 +457,7 @@ def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = N
:param str content_type: JSON by default, set application/xml if XML.
:returns: An instance of this model
:raises: DeserializationError if something went wrong
+ :rtype: ModelType
"""
deserializer = Deserializer(cls._infer_class_models())
return deserializer(cls.__name__, data, content_type=content_type) # type: ignore
@@ -426,9 +476,11 @@ def from_dict(
and last_rest_key_case_insensitive_extractor)
:param dict data: A dict using RestAPI structure
+ :param function key_extractors: A key extractor function.
:param str content_type: JSON by default, set application/xml if XML.
:returns: An instance of this model
:raises: DeserializationError if something went wrong
+ :rtype: ModelType
"""
deserializer = Deserializer(cls._infer_class_models())
deserializer.key_extractors = ( # type: ignore
@@ -448,7 +500,7 @@ def _flatten_subtype(cls, key, objects):
return {}
result = dict(cls._subtype_map[key])
for valuetype in cls._subtype_map[key].values():
- result.update(objects[valuetype]._flatten_subtype(key, objects))
+ result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access
return result
@classmethod
@@ -456,6 +508,11 @@ def _classify(cls, response, objects):
"""Check the class _subtype_map for any child classes.
We want to ignore any inherited _subtype_maps.
Remove the polymorphic key from the initial data.
+
+ :param dict response: The initial data
+ :param dict objects: The class objects
+ :returns: The class to be used
+ :rtype: class
"""
for subtype_key in cls.__dict__.get("_subtype_map", {}).keys():
subtype_value = None
@@ -501,11 +558,13 @@ def _decode_attribute_map_key(key):
inside the received data.
:param str key: A key string from the generated code
+ :returns: The decoded key
+ :rtype: str
"""
return key.replace("\\.", ".")
-class Serializer(object):
+class Serializer(object): # pylint: disable=too-many-public-methods
"""Request object model serializer."""
basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
@@ -560,13 +619,16 @@ def __init__(self, classes: Optional[Mapping[str, type]] = None):
self.key_transformer = full_restapi_key_transformer
self.client_side_validation = True
- def _serialize(self, target_obj, data_type=None, **kwargs):
+ def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals
+ self, target_obj, data_type=None, **kwargs
+ ):
"""Serialize data into a string according to type.
- :param target_obj: The data to be serialized.
+ :param object target_obj: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str, dict
:raises: SerializationError if serialization fails.
+ :returns: The serialized data.
"""
key_transformer = kwargs.get("key_transformer", self.key_transformer)
keep_readonly = kwargs.get("keep_readonly", False)
@@ -592,12 +654,14 @@ def _serialize(self, target_obj, data_type=None, **kwargs):
serialized = {}
if is_xml_model_serialization:
- serialized = target_obj._create_xml_node()
+ serialized = target_obj._create_xml_node() # pylint: disable=protected-access
try:
- attributes = target_obj._attribute_map
+ attributes = target_obj._attribute_map # pylint: disable=protected-access
for attr, attr_desc in attributes.items():
attr_name = attr
- if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False):
+ if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access
+ attr_name, {}
+ ).get("readonly", False):
continue
if attr_name == "additional_properties" and attr_desc["key"] == "":
@@ -633,7 +697,8 @@ def _serialize(self, target_obj, data_type=None, **kwargs):
if isinstance(new_attr, list):
serialized.extend(new_attr) # type: ignore
elif isinstance(new_attr, ET.Element):
- # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces.
+ # If the down XML has no XML/Name,
+ # we MUST replace the tag with the local tag. But keeping the namespaces.
if "name" not in getattr(orig_attr, "_xml_map", {}):
splitted_tag = new_attr.tag.split("}")
if len(splitted_tag) == 2: # Namespace
@@ -664,17 +729,17 @@ def _serialize(self, target_obj, data_type=None, **kwargs):
except (AttributeError, KeyError, TypeError) as err:
msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj))
raise SerializationError(msg) from err
- else:
- return serialized
+ return serialized
def body(self, data, data_type, **kwargs):
"""Serialize data intended for a request body.
- :param data: The data to be serialized.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: dict
:raises: SerializationError if serialization fails.
:raises: ValueError if data is None
+ :returns: The serialized request body
"""
# Just in case this is a dict
@@ -703,7 +768,7 @@ def body(self, data, data_type, **kwargs):
attribute_key_case_insensitive_extractor,
last_rest_key_case_insensitive_extractor,
]
- data = deserializer._deserialize(data_type, data)
+ data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access
except DeserializationError as err:
raise SerializationError("Unable to build a model: " + str(err)) from err
@@ -712,9 +777,11 @@ def body(self, data, data_type, **kwargs):
def url(self, name, data, data_type, **kwargs):
"""Serialize data intended for a URL path.
- :param data: The data to be serialized.
+ :param str name: The name of the URL path parameter.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
+ :returns: The serialized URL path
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
"""
@@ -728,21 +795,20 @@ def url(self, name, data, data_type, **kwargs):
output = output.replace("{", quote("{")).replace("}", quote("}"))
else:
output = quote(str(output), safe="")
- except SerializationError:
- raise TypeError("{} must be type {}.".format(name, data_type))
- else:
- return output
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return output
def query(self, name, data, data_type, **kwargs):
"""Serialize data intended for a URL query.
- :param data: The data to be serialized.
+ :param str name: The name of the query parameter.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
- :keyword bool skip_quote: Whether to skip quote the serialized result.
- Defaults to False.
:rtype: str, list
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
+ :returns: The serialized query parameter
"""
try:
# Treat the list aside, since we don't want to encode the div separator
@@ -759,19 +825,20 @@ def query(self, name, data, data_type, **kwargs):
output = str(output)
else:
output = quote(str(output), safe="")
- except SerializationError:
- raise TypeError("{} must be type {}.".format(name, data_type))
- else:
- return str(output)
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return str(output)
def header(self, name, data, data_type, **kwargs):
"""Serialize data intended for a request header.
- :param data: The data to be serialized.
+ :param str name: The name of the header.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
+ :returns: The serialized header
"""
try:
if data_type in ["[str]"]:
@@ -780,21 +847,20 @@ def header(self, name, data, data_type, **kwargs):
output = self.serialize_data(data, data_type, **kwargs)
if data_type == "bool":
output = json.dumps(output)
- except SerializationError:
- raise TypeError("{} must be type {}.".format(name, data_type))
- else:
- return str(output)
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return str(output)
def serialize_data(self, data, data_type, **kwargs):
"""Serialize generic data according to supplied data type.
- :param data: The data to be serialized.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
- :param bool required: Whether it's essential that the data not be
- empty or None
:raises: AttributeError if required data is None.
:raises: ValueError if data is None
:raises: SerializationError if serialization fails.
+ :returns: The serialized data.
+ :rtype: str, int, float, bool, dict, list
"""
if data is None:
raise ValueError("No value for given attribute")
@@ -805,7 +871,7 @@ def serialize_data(self, data, data_type, **kwargs):
if data_type in self.basic_types.values():
return self.serialize_basic(data, data_type, **kwargs)
- elif data_type in self.serialize_type:
+ if data_type in self.serialize_type:
return self.serialize_type[data_type](data, **kwargs)
# If dependencies is empty, try with current data class
@@ -821,11 +887,10 @@ def serialize_data(self, data, data_type, **kwargs):
except (ValueError, TypeError) as err:
msg = "Unable to serialize value: {!r} as type: {!r}."
raise SerializationError(msg.format(data, data_type)) from err
- else:
- return self._serialize(data, **kwargs)
+ return self._serialize(data, **kwargs)
@classmethod
- def _get_custom_serializers(cls, data_type, **kwargs):
+ def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements
custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type)
if custom_serializer:
return custom_serializer
@@ -841,23 +906,26 @@ def serialize_basic(cls, data, data_type, **kwargs):
- basic_types_serializers dict[str, callable] : If set, use the callable as serializer
- is_xml bool : If set, use xml_basic_types_serializers
- :param data: Object to be serialized.
+ :param obj data: Object to be serialized.
:param str data_type: Type of object in the iterable.
+ :rtype: str, int, float, bool
+ :return: serialized object
"""
custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
if custom_serializer:
return custom_serializer(data)
if data_type == "str":
return cls.serialize_unicode(data)
- return eval(data_type)(data) # nosec
+ return eval(data_type)(data) # nosec # pylint: disable=eval-used
@classmethod
def serialize_unicode(cls, data):
"""Special handling for serializing unicode strings in Py2.
Encode to UTF-8 if unicode, otherwise handle as a str.
- :param data: Object to be serialized.
+ :param str data: Object to be serialized.
:rtype: str
+ :return: serialized object
"""
try: # If I received an enum, return its value
return data.value
@@ -871,8 +939,7 @@ def serialize_unicode(cls, data):
return data
except NameError:
return str(data)
- else:
- return str(data)
+ return str(data)
def serialize_iter(self, data, iter_type, div=None, **kwargs):
"""Serialize iterable.
@@ -882,15 +949,13 @@ def serialize_iter(self, data, iter_type, div=None, **kwargs):
serialization_ctxt['type'] should be same as data_type.
- is_xml bool : If set, serialize as XML
- :param list attr: Object to be serialized.
+ :param list data: Object to be serialized.
:param str iter_type: Type of object in the iterable.
- :param bool required: Whether the objects in the iterable must
- not be None or empty.
:param str div: If set, this str will be used to combine the elements
in the iterable into a combined string. Default is 'None'.
- :keyword bool do_quote: Whether to quote the serialized result of each iterable element.
Defaults to False.
:rtype: list, str
+ :return: serialized iterable
"""
if isinstance(data, str):
raise SerializationError("Refuse str type as a valid iter type.")
@@ -945,9 +1010,8 @@ def serialize_dict(self, attr, dict_type, **kwargs):
:param dict attr: Object to be serialized.
:param str dict_type: Type of object in the dictionary.
- :param bool required: Whether the objects in the dictionary must
- not be None or empty.
:rtype: dict
+ :return: serialized dictionary
"""
serialization_ctxt = kwargs.get("serialization_ctxt", {})
serialized = {}
@@ -971,7 +1035,7 @@ def serialize_dict(self, attr, dict_type, **kwargs):
return serialized
- def serialize_object(self, attr, **kwargs):
+ def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements
"""Serialize a generic object.
This will be handled as a dictionary. If object passed in is not
a basic type (str, int, float, dict, list) it will simply be
@@ -979,6 +1043,7 @@ def serialize_object(self, attr, **kwargs):
:param dict attr: Object to be serialized.
:rtype: dict or str
+ :return: serialized object
"""
if attr is None:
return None
@@ -1003,7 +1068,7 @@ def serialize_object(self, attr, **kwargs):
return self.serialize_decimal(attr)
# If it's a model or I know this dependency, serialize as a Model
- elif obj_type in self.dependencies.values() or isinstance(attr, Model):
+ if obj_type in self.dependencies.values() or isinstance(attr, Model):
return self._serialize(attr)
if obj_type == dict:
@@ -1034,56 +1099,61 @@ def serialize_enum(attr, enum_obj=None):
try:
enum_obj(result) # type: ignore
return result
- except ValueError:
+ except ValueError as exc:
for enum_value in enum_obj: # type: ignore
if enum_value.value.lower() == str(attr).lower():
return enum_value.value
error = "{!r} is not valid value for enum {!r}"
- raise SerializationError(error.format(attr, enum_obj))
+ raise SerializationError(error.format(attr, enum_obj)) from exc
@staticmethod
- def serialize_bytearray(attr, **kwargs):
+ def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize bytearray into base-64 string.
- :param attr: Object to be serialized.
+ :param str attr: Object to be serialized.
:rtype: str
+ :return: serialized base64
"""
return b64encode(attr).decode()
@staticmethod
- def serialize_base64(attr, **kwargs):
+ def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize str into base-64 string.
- :param attr: Object to be serialized.
+ :param str attr: Object to be serialized.
:rtype: str
+ :return: serialized base64
"""
encoded = b64encode(attr).decode("ascii")
return encoded.strip("=").replace("+", "-").replace("/", "_")
@staticmethod
- def serialize_decimal(attr, **kwargs):
+ def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Decimal object to float.
- :param attr: Object to be serialized.
+ :param decimal attr: Object to be serialized.
:rtype: float
+ :return: serialized decimal
"""
return float(attr)
@staticmethod
- def serialize_long(attr, **kwargs):
+ def serialize_long(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize long (Py2) or int (Py3).
- :param attr: Object to be serialized.
+ :param int attr: Object to be serialized.
:rtype: int/long
+ :return: serialized long
"""
return _long_type(attr)
@staticmethod
- def serialize_date(attr, **kwargs):
+ def serialize_date(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Date object into ISO-8601 formatted string.
:param Date attr: Object to be serialized.
:rtype: str
+ :return: serialized date
"""
if isinstance(attr, str):
attr = isodate.parse_date(attr)
@@ -1091,11 +1161,12 @@ def serialize_date(attr, **kwargs):
return t
@staticmethod
- def serialize_time(attr, **kwargs):
+ def serialize_time(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Time object into ISO-8601 formatted string.
:param datetime.time attr: Object to be serialized.
:rtype: str
+ :return: serialized time
"""
if isinstance(attr, str):
attr = isodate.parse_time(attr)
@@ -1105,30 +1176,32 @@ def serialize_time(attr, **kwargs):
return t
@staticmethod
- def serialize_duration(attr, **kwargs):
+ def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize TimeDelta object into ISO-8601 formatted string.
:param TimeDelta attr: Object to be serialized.
:rtype: str
+ :return: serialized duration
"""
if isinstance(attr, str):
attr = isodate.parse_duration(attr)
return isodate.duration_isoformat(attr)
@staticmethod
- def serialize_rfc(attr, **kwargs):
+ def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Datetime object into RFC-1123 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: TypeError if format invalid.
+ :return: serialized rfc
"""
try:
if not attr.tzinfo:
_LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
utc = attr.utctimetuple()
- except AttributeError:
- raise TypeError("RFC1123 object must be valid Datetime object.")
+ except AttributeError as exc:
+ raise TypeError("RFC1123 object must be valid Datetime object.") from exc
return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
Serializer.days[utc.tm_wday],
@@ -1141,12 +1214,13 @@ def serialize_rfc(attr, **kwargs):
)
@staticmethod
- def serialize_iso(attr, **kwargs):
+ def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Datetime object into ISO-8601 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: SerializationError if format invalid.
+ :return: serialized iso
"""
if isinstance(attr, str):
attr = isodate.parse_datetime(attr)
@@ -1172,13 +1246,14 @@ def serialize_iso(attr, **kwargs):
raise TypeError(msg) from err
@staticmethod
- def serialize_unix(attr, **kwargs):
+ def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Datetime object into IntTime format.
This is represented as seconds.
:param Datetime attr: Object to be serialized.
:rtype: int
:raises: SerializationError if format invalid
+ :return: serialied unix
"""
if isinstance(attr, int):
return attr
@@ -1186,11 +1261,11 @@ def serialize_unix(attr, **kwargs):
if not attr.tzinfo:
_LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
return int(calendar.timegm(attr.utctimetuple()))
- except AttributeError:
- raise TypeError("Unix time object must be valid Datetime object.")
+ except AttributeError as exc:
+ raise TypeError("Unix time object must be valid Datetime object.") from exc
-def rest_key_extractor(attr, attr_desc, data):
+def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
key = attr_desc["key"]
working_data = data
@@ -1211,7 +1286,9 @@ def rest_key_extractor(attr, attr_desc, data):
return working_data.get(key)
-def rest_key_case_insensitive_extractor(attr, attr_desc, data):
+def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements
+ attr, attr_desc, data
+):
key = attr_desc["key"]
working_data = data
@@ -1232,17 +1309,29 @@ def rest_key_case_insensitive_extractor(attr, attr_desc, data):
return attribute_key_case_insensitive_extractor(key, None, working_data)
-def last_rest_key_extractor(attr, attr_desc, data):
- """Extract the attribute in "data" based on the last part of the JSON path key."""
+def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
+ """Extract the attribute in "data" based on the last part of the JSON path key.
+
+ :param str attr: The attribute to extract
+ :param dict attr_desc: The attribute description
+ :param dict data: The data to extract from
+ :rtype: object
+ :returns: The extracted attribute
+ """
key = attr_desc["key"]
dict_keys = _FLATTEN.split(key)
return attribute_key_extractor(dict_keys[-1], None, data)
-def last_rest_key_case_insensitive_extractor(attr, attr_desc, data):
+def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
"""Extract the attribute in "data" based on the last part of the JSON path key.
This is the case insensitive version of "last_rest_key_extractor"
+ :param str attr: The attribute to extract
+ :param dict attr_desc: The attribute description
+ :param dict data: The data to extract from
+ :rtype: object
+ :returns: The extracted attribute
"""
key = attr_desc["key"]
dict_keys = _FLATTEN.split(key)
@@ -1279,7 +1368,7 @@ def _extract_name_from_internal_type(internal_type):
return xml_name
-def xml_key_extractor(attr, attr_desc, data):
+def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements
if isinstance(data, dict):
return None
@@ -1331,22 +1420,21 @@ def xml_key_extractor(attr, attr_desc, data):
if is_iter_type:
if is_wrapped:
return None # is_wrapped no node, we want None
- else:
- return [] # not wrapped, assume empty list
+ return [] # not wrapped, assume empty list
return None # Assume it's not there, maybe an optional node.
# If is_iter_type and not wrapped, return all found children
if is_iter_type:
if not is_wrapped:
return children
- else: # Iter and wrapped, should have found one node only (the wrap one)
- if len(children) != 1:
- raise DeserializationError(
- "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format(
- xml_name
- )
+ # Iter and wrapped, should have found one node only (the wrap one)
+ if len(children) != 1:
+ raise DeserializationError(
+ "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long
+ xml_name
)
- return list(children[0]) # Might be empty list and that's ok.
+ )
+ return list(children[0]) # Might be empty list and that's ok.
# Here it's not a itertype, we should have found one element only or empty
if len(children) > 1:
@@ -1363,7 +1451,7 @@ class Deserializer(object):
basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
- valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
+ valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
def __init__(self, classes: Optional[Mapping[str, type]] = None):
self.deserialize_type = {
@@ -1403,11 +1491,12 @@ def __call__(self, target_obj, response_data, content_type=None):
:param str content_type: Swagger "produces" if available.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
+ :rtype: object
"""
data = self._unpack_content(response_data, content_type)
return self._deserialize(target_obj, data)
- def _deserialize(self, target_obj, data):
+ def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements
"""Call the deserializer on a model.
Data needs to be already deserialized as JSON or XML ElementTree
@@ -1416,12 +1505,13 @@ def _deserialize(self, target_obj, data):
:param object data: Object to deserialize.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
+ :rtype: object
"""
# This is already a model, go recursive just in case
if hasattr(data, "_attribute_map"):
constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")]
try:
- for attr, mapconfig in data._attribute_map.items():
+ for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access
if attr in constants:
continue
value = getattr(data, attr)
@@ -1440,13 +1530,13 @@ def _deserialize(self, target_obj, data):
if isinstance(response, str):
return self.deserialize_data(data, response)
- elif isinstance(response, type) and issubclass(response, Enum):
+ if isinstance(response, type) and issubclass(response, Enum):
return self.deserialize_enum(data, response)
if data is None or data is CoreNull:
return data
try:
- attributes = response._attribute_map # type: ignore
+ attributes = response._attribute_map # type: ignore # pylint: disable=protected-access
d_attrs = {}
for attr, attr_desc in attributes.items():
# Check empty string. If it's not empty, someone has a real "additionalProperties"...
@@ -1476,9 +1566,8 @@ def _deserialize(self, target_obj, data):
except (AttributeError, TypeError, KeyError) as err:
msg = "Unable to deserialize to object: " + class_name # type: ignore
raise DeserializationError(msg) from err
- else:
- additional_properties = self._build_additional_properties(attributes, data)
- return self._instantiate_model(response, d_attrs, additional_properties)
+ additional_properties = self._build_additional_properties(attributes, data)
+ return self._instantiate_model(response, d_attrs, additional_properties)
def _build_additional_properties(self, attribute_map, data):
if not self.additional_properties_detection:
@@ -1505,6 +1594,8 @@ def _classify_target(self, target, data):
:param str target: The target object type to deserialize to.
:param str/dict data: The response data to deserialize.
+ :return: The classified target object and its class name.
+ :rtype: tuple
"""
if target is None:
return None, None
@@ -1516,7 +1607,7 @@ def _classify_target(self, target, data):
return target, target
try:
- target = target._classify(data, self.dependencies) # type: ignore
+ target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access
except AttributeError:
pass # Target is not a Model, no classify
return target, target.__class__.__name__ # type: ignore
@@ -1531,10 +1622,12 @@ def failsafe_deserialize(self, target_obj, data, content_type=None):
:param str target_obj: The target object type to deserialize to.
:param str/dict data: The response data to deserialize.
:param str content_type: Swagger "produces" if available.
+ :return: Deserialized object.
+ :rtype: object
"""
try:
return self(target_obj, data, content_type=content_type)
- except:
+ except: # pylint: disable=bare-except
_LOGGER.debug(
"Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
)
@@ -1552,10 +1645,12 @@ def _unpack_content(raw_data, content_type=None):
If raw_data is something else, bypass all logic and return it directly.
- :param raw_data: Data to be processed.
- :param content_type: How to parse if raw_data is a string/bytes.
+ :param obj raw_data: Data to be processed.
+ :param str content_type: How to parse if raw_data is a string/bytes.
:raises JSONDecodeError: If JSON is requested and parsing is impossible.
:raises UnicodeDecodeError: If bytes is not UTF8
+ :rtype: object
+ :return: Unpacked content.
"""
# Assume this is enough to detect a Pipeline Response without importing it
context = getattr(raw_data, "context", {})
@@ -1579,14 +1674,21 @@ def _unpack_content(raw_data, content_type=None):
def _instantiate_model(self, response, attrs, additional_properties=None):
"""Instantiate a response model passing in deserialized args.
- :param response: The response model class.
- :param d_attrs: The deserialized response attributes.
+ :param Response response: The response model class.
+ :param dict attrs: The deserialized response attributes.
+ :param dict additional_properties: Additional properties to be set.
+ :rtype: Response
+ :return: The instantiated response model.
"""
if callable(response):
subtype = getattr(response, "_subtype_map", {})
try:
- readonly = [k for k, v in response._validation.items() if v.get("readonly")]
- const = [k for k, v in response._validation.items() if v.get("constant")]
+ readonly = [
+ k for k, v in response._validation.items() if v.get("readonly") # pylint: disable=protected-access
+ ]
+ const = [
+ k for k, v in response._validation.items() if v.get("constant") # pylint: disable=protected-access
+ ]
kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const}
response_obj = response(**kwargs)
for attr in readonly:
@@ -1596,7 +1698,7 @@ def _instantiate_model(self, response, attrs, additional_properties=None):
return response_obj
except TypeError as err:
msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore
- raise DeserializationError(msg + str(err))
+ raise DeserializationError(msg + str(err)) from err
else:
try:
for attr, value in attrs.items():
@@ -1605,15 +1707,16 @@ def _instantiate_model(self, response, attrs, additional_properties=None):
except Exception as exp:
msg = "Unable to populate response model. "
msg += "Type: {}, Error: {}".format(type(response), exp)
- raise DeserializationError(msg)
+ raise DeserializationError(msg) from exp
- def deserialize_data(self, data, data_type):
+ def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements
"""Process data for deserialization according to data type.
:param str data: The response string to be deserialized.
:param str data_type: The type to deserialize to.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
+ :rtype: object
"""
if data is None:
return data
@@ -1627,7 +1730,11 @@ def deserialize_data(self, data, data_type):
if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())):
return data
- is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"]
+ is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment
+ "object",
+ "[]",
+ r"{}",
+ ]
if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text:
return None
data_val = self.deserialize_type[data_type](data)
@@ -1647,14 +1754,14 @@ def deserialize_data(self, data, data_type):
msg = "Unable to deserialize response data."
msg += " Data: {}, {}".format(data, data_type)
raise DeserializationError(msg) from err
- else:
- return self._deserialize(obj_type, data)
+ return self._deserialize(obj_type, data)
def deserialize_iter(self, attr, iter_type):
"""Deserialize an iterable.
:param list attr: Iterable to be deserialized.
:param str iter_type: The type of object in the iterable.
+ :return: Deserialized iterable.
:rtype: list
"""
if attr is None:
@@ -1671,6 +1778,7 @@ def deserialize_dict(self, attr, dict_type):
:param dict/list attr: Dictionary to be deserialized. Also accepts
a list of key, value pairs.
:param str dict_type: The object type of the items in the dictionary.
+ :return: Deserialized dictionary.
:rtype: dict
"""
if isinstance(attr, list):
@@ -1681,11 +1789,12 @@ def deserialize_dict(self, attr, dict_type):
attr = {el.tag: el.text for el in attr}
return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()}
- def deserialize_object(self, attr, **kwargs):
+ def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements
"""Deserialize a generic object.
This will be handled as a dictionary.
:param dict attr: Dictionary to be deserialized.
+ :return: Deserialized object.
:rtype: dict
:raises: TypeError if non-builtin datatype encountered.
"""
@@ -1720,11 +1829,10 @@ def deserialize_object(self, attr, **kwargs):
pass
return deserialized
- else:
- error = "Cannot deserialize generic object with type: "
- raise TypeError(error + str(obj_type))
+ error = "Cannot deserialize generic object with type: "
+ raise TypeError(error + str(obj_type))
- def deserialize_basic(self, attr, data_type):
+ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements
"""Deserialize basic builtin data type from string.
Will attempt to convert to str, int, float and bool.
This function will also accept '1', '0', 'true' and 'false' as
@@ -1732,6 +1840,7 @@ def deserialize_basic(self, attr, data_type):
:param str attr: response string to be deserialized.
:param str data_type: deserialization data type.
+ :return: Deserialized basic type.
:rtype: str, int, float or bool
:raises: TypeError if string format is not valid.
"""
@@ -1743,24 +1852,23 @@ def deserialize_basic(self, attr, data_type):
if data_type == "str":
# None or '', node is empty string.
return ""
- else:
- # None or '', node with a strong type is None.
- # Don't try to model "empty bool" or "empty int"
- return None
+ # None or '', node with a strong type is None.
+ # Don't try to model "empty bool" or "empty int"
+ return None
if data_type == "bool":
if attr in [True, False, 1, 0]:
return bool(attr)
- elif isinstance(attr, str):
+ if isinstance(attr, str):
if attr.lower() in ["true", "1"]:
return True
- elif attr.lower() in ["false", "0"]:
+ if attr.lower() in ["false", "0"]:
return False
raise TypeError("Invalid boolean value: {}".format(attr))
if data_type == "str":
return self.deserialize_unicode(attr)
- return eval(data_type)(attr) # nosec
+ return eval(data_type)(attr) # nosec # pylint: disable=eval-used
@staticmethod
def deserialize_unicode(data):
@@ -1768,6 +1876,7 @@ def deserialize_unicode(data):
as a string.
:param str data: response string to be deserialized.
+ :return: Deserialized string.
:rtype: str or unicode
"""
# We might be here because we have an enum modeled as string,
@@ -1781,8 +1890,7 @@ def deserialize_unicode(data):
return data
except NameError:
return str(data)
- else:
- return str(data)
+ return str(data)
@staticmethod
def deserialize_enum(data, enum_obj):
@@ -1794,6 +1902,7 @@ def deserialize_enum(data, enum_obj):
:param str data: Response string to be deserialized. If this value is
None or invalid it will be returned as-is.
:param Enum enum_obj: Enum object to deserialize to.
+ :return: Deserialized enum object.
:rtype: Enum
"""
if isinstance(data, enum_obj) or data is None:
@@ -1804,9 +1913,9 @@ def deserialize_enum(data, enum_obj):
# Workaround. We might consider remove it in the future.
try:
return list(enum_obj.__members__.values())[data]
- except IndexError:
+ except IndexError as exc:
error = "{!r} is not a valid index for enum {!r}"
- raise DeserializationError(error.format(data, enum_obj))
+ raise DeserializationError(error.format(data, enum_obj)) from exc
try:
return enum_obj(str(data))
except ValueError:
@@ -1822,6 +1931,7 @@ def deserialize_bytearray(attr):
"""Deserialize string into bytearray.
:param str attr: response string to be deserialized.
+ :return: Deserialized bytearray
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
@@ -1834,6 +1944,7 @@ def deserialize_base64(attr):
"""Deserialize base64 encoded string into string.
:param str attr: response string to be deserialized.
+ :return: Deserialized base64 string
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
@@ -1849,8 +1960,9 @@ def deserialize_decimal(attr):
"""Deserialize string into Decimal object.
:param str attr: response string to be deserialized.
- :rtype: Decimal
+ :return: Deserialized decimal
:raises: DeserializationError if string format invalid.
+ :rtype: decimal
"""
if isinstance(attr, ET.Element):
attr = attr.text
@@ -1865,6 +1977,7 @@ def deserialize_long(attr):
"""Deserialize string into long (Py2) or int (Py3).
:param str attr: response string to be deserialized.
+ :return: Deserialized int
:rtype: long or int
:raises: ValueError if string format invalid.
"""
@@ -1877,6 +1990,7 @@ def deserialize_duration(attr):
"""Deserialize ISO-8601 formatted string into TimeDelta object.
:param str attr: response string to be deserialized.
+ :return: Deserialized duration
:rtype: TimeDelta
:raises: DeserializationError if string format invalid.
"""
@@ -1887,14 +2001,14 @@ def deserialize_duration(attr):
except (ValueError, OverflowError, AttributeError) as err:
msg = "Cannot deserialize duration object."
raise DeserializationError(msg) from err
- else:
- return duration
+ return duration
@staticmethod
def deserialize_date(attr):
"""Deserialize ISO-8601 formatted string into Date object.
:param str attr: response string to be deserialized.
+ :return: Deserialized date
:rtype: Date
:raises: DeserializationError if string format invalid.
"""
@@ -1910,6 +2024,7 @@ def deserialize_time(attr):
"""Deserialize ISO-8601 formatted string into time object.
:param str attr: response string to be deserialized.
+ :return: Deserialized time
:rtype: datetime.time
:raises: DeserializationError if string format invalid.
"""
@@ -1924,6 +2039,7 @@ def deserialize_rfc(attr):
"""Deserialize RFC-1123 formatted string into Datetime object.
:param str attr: response string to be deserialized.
+ :return: Deserialized RFC datetime
:rtype: Datetime
:raises: DeserializationError if string format invalid.
"""
@@ -1939,14 +2055,14 @@ def deserialize_rfc(attr):
except ValueError as err:
msg = "Cannot deserialize to rfc datetime object."
raise DeserializationError(msg) from err
- else:
- return date_obj
+ return date_obj
@staticmethod
def deserialize_iso(attr):
"""Deserialize ISO-8601 formatted string into Datetime object.
:param str attr: response string to be deserialized.
+ :return: Deserialized ISO datetime
:rtype: Datetime
:raises: DeserializationError if string format invalid.
"""
@@ -1976,8 +2092,7 @@ def deserialize_iso(attr):
except (ValueError, OverflowError, AttributeError) as err:
msg = "Cannot deserialize datetime object."
raise DeserializationError(msg) from err
- else:
- return date_obj
+ return date_obj
@staticmethod
def deserialize_unix(attr):
@@ -1985,6 +2100,7 @@ def deserialize_unix(attr):
This is represented as seconds.
:param int attr: Object to be serialized.
+ :return: Deserialized datetime
:rtype: Datetime
:raises: DeserializationError if format invalid
"""
@@ -1996,5 +2112,4 @@ def deserialize_unix(attr):
except ValueError as err:
msg = "Cannot deserialize to unix datetime object."
raise DeserializationError(msg) from err
- else:
- return date_obj
+ return date_obj
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_validation.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_validation.py
deleted file mode 100644
index 752b2822f9d3..000000000000
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_validation.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
-# Code generated by Microsoft (R) Python Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
-# --------------------------------------------------------------------------
-import functools
-
-
-def api_version_validation(**kwargs):
- params_added_on = kwargs.pop("params_added_on", {})
- method_added_on = kwargs.pop("method_added_on", "")
-
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- try:
- # this assumes the client has an _api_version attribute
- client = args[0]
- client_api_version = client._config.api_version # pylint: disable=protected-access
- except AttributeError:
- return func(*args, **kwargs)
-
- if method_added_on > client_api_version:
- raise ValueError(
- f"'{func.__name__}' is not available in API version "
- f"{client_api_version}. Pass service API version {method_added_on} or newer to your client."
- )
-
- unsupported = {
- parameter: api_version
- for api_version, parameters in params_added_on.items()
- for parameter in parameters
- if parameter in kwargs and api_version > client_api_version
- }
- if unsupported:
- raise ValueError(
- "".join(
- [
- f"'{param}' is not available in API version {client_api_version}. "
- f"Use service API version {version} or newer.\n"
- for param, version in unsupported.items()
- ]
- )
- )
- return func(*args, **kwargs)
-
- return wrapper
-
- return decorator
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_version.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_version.py
index c7d155d924dd..bbcd28b4aa67 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_version.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_version.py
@@ -6,4 +6,4 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-VERSION = "1.0.0b5"
+VERSION = "1.0.0b2"
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/__init__.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/__init__.py
index 737422fc4e69..63adf2c71797 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/__init__.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/__init__.py
@@ -6,18 +6,20 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from ._patch import DocumentIntelligenceClient
-from ._patch import DocumentIntelligenceAdministrationClient
+from ._client import DocumentIntelligenceClient
+from ._client import DocumentIntelligenceAdministrationClient
-
-from ._patch import AsyncAnalyzeDocumentLROPoller
+try:
+ from ._patch import __all__ as _patch_all
+ from ._patch import * # pylint: disable=unused-wildcard-import
+except ImportError:
+ _patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
- "AsyncAnalyzeDocumentLROPoller",
"DocumentIntelligenceClient",
"DocumentIntelligenceAdministrationClient",
]
-
+__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/__init__.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/__init__.py
index 36334aa2ea34..057c6c92037f 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/__init__.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/__init__.py
@@ -6,15 +6,16 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from ._patch import DocumentIntelligenceClientOperationsMixin
-from ._patch import DocumentIntelligenceAdministrationClientOperationsMixin
-
+from ._operations import DocumentIntelligenceClientOperationsMixin
+from ._operations import DocumentIntelligenceAdministrationClientOperationsMixin
+from ._patch import __all__ as _patch_all
+from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"DocumentIntelligenceClientOperationsMixin",
"DocumentIntelligenceAdministrationClientOperationsMixin",
]
-
+__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_operations.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_operations.py
index d77704638d9a..f4d7220f6a38 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_operations.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_operations.py
@@ -97,7 +97,7 @@ async def _analyze_document_initial(
output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
**kwargs: Any
) -> AsyncIterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -190,7 +190,7 @@ async def begin_analyze_document(
:type model_id: str
:param analyze_request: Analyze request parameters. Default value is None.
:type analyze_request: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -242,7 +242,7 @@ async def begin_analyze_document(
:type model_id: str
:param analyze_request: Analyze request parameters. Default value is None.
:type analyze_request: JSON
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -294,7 +294,7 @@ async def begin_analyze_document(
:type model_id: str
:param analyze_request: Analyze request parameters. Default value is None.
:type analyze_request: IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -347,7 +347,7 @@ async def begin_analyze_document(
AnalyzeDocumentRequest, JSON, IO[bytes] Default value is None.
:type analyze_request: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest or JSON or
IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -452,7 +452,7 @@ async def _analyze_batch_documents_initial(
output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
**kwargs: Any
) -> AsyncIterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -545,7 +545,7 @@ async def begin_analyze_batch_documents(
:type model_id: str
:param analyze_batch_request: Analyze batch request parameters. Default value is None.
:type analyze_batch_request: ~azure.ai.documentintelligence.models.AnalyzeBatchDocumentsRequest
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -598,7 +598,7 @@ async def begin_analyze_batch_documents(
:type model_id: str
:param analyze_batch_request: Analyze batch request parameters. Default value is None.
:type analyze_batch_request: JSON
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -651,7 +651,7 @@ async def begin_analyze_batch_documents(
:type model_id: str
:param analyze_batch_request: Analyze batch request parameters. Default value is None.
:type analyze_batch_request: IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -705,7 +705,7 @@ async def begin_analyze_batch_documents(
AnalyzeBatchDocumentsRequest, JSON, IO[bytes] Default value is None.
:type analyze_batch_request: ~azure.ai.documentintelligence.models.AnalyzeBatchDocumentsRequest
or JSON or IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword locale: Locale hint for text recognition and document analysis. Value may contain
@@ -809,7 +809,7 @@ async def get_analyze_result_pdf(self, model_id: str, result_id: str, **kwargs:
:rtype: AsyncIterator[bytes]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -877,7 +877,7 @@ async def get_analyze_result_figure(
:rtype: AsyncIterator[bytes]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -940,7 +940,7 @@ async def _classify_document_initial(
pages: Optional[str] = None,
**kwargs: Any
) -> AsyncIterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1028,7 +1028,7 @@ async def begin_classify_document(
:keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage".
Default value is None.
:paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
@@ -1064,7 +1064,7 @@ async def begin_classify_document(
:keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage".
Default value is None.
:paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
@@ -1100,7 +1100,7 @@ async def begin_classify_document(
:keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage".
Default value is None.
:paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
@@ -1137,7 +1137,7 @@ async def begin_classify_document(
:keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage".
Default value is None.
:paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
+ :keyword pages: List of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
None.
:paramtype pages: str
:return: An instance of AsyncLROPoller that returns AnalyzeResult. The AnalyzeResult is
@@ -1214,7 +1214,7 @@ class DocumentIntelligenceAdministrationClientOperationsMixin( # pylint: disabl
async def _build_document_model_initial(
self, build_request: Union[_models.BuildDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any
) -> AsyncIterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1404,7 +1404,7 @@ def get_long_running_output(pipeline_response):
async def _compose_model_initial(
self, compose_request: Union[_models.ComposeDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any
) -> AsyncIterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1665,7 +1665,7 @@ async def authorize_model_copy(
:rtype: ~azure.ai.documentintelligence.models.CopyAuthorization
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1728,7 +1728,7 @@ async def authorize_model_copy(
async def _copy_model_to_initial(
self, model_id: str, copy_to_request: Union[_models.CopyAuthorization, JSON, IO[bytes]], **kwargs: Any
) -> AsyncIterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1940,7 +1940,7 @@ async def get_model(self, model_id: str, **kwargs: Any) -> _models.DocumentModel
:rtype: ~azure.ai.documentintelligence.models.DocumentModelDetails
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2010,7 +2010,7 @@ def list_models(self, **kwargs: Any) -> AsyncIterable["_models.DocumentModelDeta
cls: ClsType[List[_models.DocumentModelDetails]] = kwargs.pop("cls", None)
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2092,7 +2092,7 @@ async def delete_model( # pylint: disable=inconsistent-return-statements
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2144,7 +2144,7 @@ async def get_resource_info(self, **kwargs: Any) -> _models.ResourceDetails:
:rtype: ~azure.ai.documentintelligence.models.ResourceDetails
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2204,7 +2204,7 @@ async def get_operation(self, operation_id: str, **kwargs: Any) -> _models.Opera
:rtype: ~azure.ai.documentintelligence.models.OperationDetails
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2274,7 +2274,7 @@ def list_operations(self, **kwargs: Any) -> AsyncIterable["_models.OperationDeta
cls: ClsType[List[_models.OperationDetails]] = kwargs.pop("cls", None)
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2347,7 +2347,7 @@ async def get_next(next_link=None):
async def _build_classifier_initial(
self, build_request: Union[_models.BuildDocumentClassifierRequest, JSON, IO[bytes]], **kwargs: Any
) -> AsyncIterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2613,7 +2613,7 @@ async def authorize_classifier_copy(
:rtype: ~azure.ai.documentintelligence.models.ClassifierCopyAuthorization
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2679,7 +2679,7 @@ async def _copy_classifier_to_initial(
copy_to_request: Union[_models.ClassifierCopyAuthorization, JSON, IO[bytes]],
**kwargs: Any
) -> AsyncIterator[bytes]:
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2895,7 +2895,7 @@ async def get_classifier(self, classifier_id: str, **kwargs: Any) -> _models.Doc
:rtype: ~azure.ai.documentintelligence.models.DocumentClassifierDetails
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2965,7 +2965,7 @@ def list_classifiers(self, **kwargs: Any) -> AsyncIterable["_models.DocumentClas
cls: ClsType[List[_models.DocumentClassifierDetails]] = kwargs.pop("cls", None)
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -3047,7 +3047,7 @@ async def delete_classifier( # pylint: disable=inconsistent-return-statements
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_patch.py
index 48f65e6f54cd..f7dd32510333 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_patch.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_patch.py
@@ -6,617 +6,9 @@
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
-import sys
-from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, Mapping, cast, overload
+from typing import List
-from azure.core.pipeline import PipelineResponse
-from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.polling.async_base_polling import AsyncLROBasePolling
-from azure.core.rest import AsyncHttpResponse, HttpRequest
-from azure.core.tracing.decorator_async import distributed_trace_async
-from azure.core.utils import case_insensitive_dict
-
-from ._operations import (
- DocumentIntelligenceClientOperationsMixin as GeneratedDIClientOps,
- DocumentIntelligenceAdministrationClientOperationsMixin as GeneratedDIAdminClientOps,
-)
-from ... import models as _models
-from ..._model_base import _deserialize
-from ..._operations._patch import PollingReturnType_co, _parse_operation_id, _finished
-
-if sys.version_info >= (3, 9):
- from collections.abc import MutableMapping
-else:
- from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
-JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
-T = TypeVar("T")
-ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
-
-
-class AsyncAnalyzeDocumentLROPoller(AsyncLROPoller[PollingReturnType_co]):
- @property
- def details(self) -> Mapping[str, Any]:
- """Returns metadata associated with the long-running operation.
-
- :return: Returns metadata associated with the long-running operation.
- :rtype: Mapping[str, Any]
- """
- return {
- "operation_id": _parse_operation_id(
- self.polling_method()._initial_response.http_response.headers["Operation-Location"] # type: ignore # pylint: disable=protected-access
- ),
- }
-
- @classmethod
- def from_continuation_token(
- cls, polling_method: AsyncPollingMethod[PollingReturnType_co], continuation_token: str, **kwargs: Any
- ) -> "AsyncAnalyzeDocumentLROPoller":
- (
- client,
- initial_response,
- deserialization_callback,
- ) = polling_method.from_continuation_token(continuation_token, **kwargs)
-
- return cls(client, initial_response, deserialization_callback, polling_method)
-
-
-class AsyncAnalyzeBatchDocumentsLROPollingMethod(AsyncLROBasePolling): # pylint: disable=name-too-long
- def finished(self) -> bool:
- """Is this polling finished?
-
- :return: Whether the polling finished or not.
- :rtype: bool
- """
- return _finished(self.status())
-
-
-class DocumentIntelligenceAdministrationClientOperationsMixin(
- GeneratedDIAdminClientOps
-): # pylint: disable=name-too-long
- @distributed_trace_async
- async def begin_build_classifier( # type: ignore[override]
- self, build_request: Union[_models.BuildDocumentClassifierRequest, JSON, IO[bytes]], **kwargs: Any
- ) -> AsyncLROPoller[_models.DocumentClassifierDetails]:
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DocumentClassifierDetails] = kwargs.pop("cls", None)
- polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = await self._build_classifier_initial( # type: ignore
- build_request=build_request,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs
- )
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.DocumentClassifierDetails, response.json())
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: AsyncPollingMethod = cast(
- AsyncPollingMethod,
- AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
- )
- elif polling is False:
- polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
- else:
- polling_method = polling
- if cont_token:
- return AsyncLROPoller[_models.DocumentClassifierDetails].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return AsyncLROPoller[_models.DocumentClassifierDetails](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
- @distributed_trace_async
- async def begin_build_document_model( # type: ignore[override]
- self, build_request: Union[_models.BuildDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any
- ) -> AsyncLROPoller[_models.DocumentModelDetails]:
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None)
- polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = await self._build_document_model_initial( # type: ignore
- build_request=build_request,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs
- )
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.DocumentModelDetails, response.json())
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: AsyncPollingMethod = cast(
- AsyncPollingMethod,
- AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
- )
- elif polling is False:
- polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
- else:
- polling_method = polling
- if cont_token:
- return AsyncLROPoller[_models.DocumentModelDetails].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return AsyncLROPoller[_models.DocumentModelDetails](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
- @distributed_trace_async
- async def begin_compose_model( # type: ignore[override]
- self, compose_request: Union[_models.ComposeDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any
- ) -> AsyncLROPoller[_models.DocumentModelDetails]:
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None)
- polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = await self._compose_model_initial( # type: ignore
- compose_request=compose_request,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs
- )
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.DocumentModelDetails, response.json())
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: AsyncPollingMethod = cast(
- AsyncPollingMethod,
- AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
- )
- elif polling is False:
- polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
- else:
- polling_method = polling
- if cont_token:
- return AsyncLROPoller[_models.DocumentModelDetails].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return AsyncLROPoller[_models.DocumentModelDetails](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
- @distributed_trace_async
- async def begin_copy_model_to( # type: ignore[override]
- self, model_id: str, copy_to_request: Union[_models.CopyAuthorization, JSON, IO[bytes]], **kwargs: Any
- ) -> AsyncLROPoller[_models.DocumentModelDetails]:
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None)
- polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = await self._copy_model_to_initial( # type: ignore
- model_id=model_id,
- copy_to_request=copy_to_request,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs
- )
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.DocumentModelDetails, response.json())
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: AsyncPollingMethod = cast(
- AsyncPollingMethod,
- AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
- )
- elif polling is False:
- polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
- else:
- polling_method = polling
- if cont_token:
- return AsyncLROPoller[_models.DocumentModelDetails].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return AsyncLROPoller[_models.DocumentModelDetails](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
-
-class DocumentIntelligenceClientOperationsMixin(GeneratedDIClientOps): # pylint: disable=name-too-long
- @overload
- async def begin_analyze_document(
- self,
- model_id: str,
- analyze_request: Optional[_models.AnalyzeDocumentRequest] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult]:
- """Analyzes document with document model.
-
- :param model_id: Unique document model name. Required.
- :type model_id: str
- :param analyze_request: Analyze request parameters. Default value is None.
- :type analyze_request: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
- None.
- :paramtype pages: str
- :keyword locale: Locale hint for text recognition and document analysis. Value may contain
- only
- the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is
- None.
- :paramtype locale: str
- :keyword string_index_type: Method used to compute string offset and length. Known values are:
- "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None.
- :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType
- :keyword features: List of optional analysis features. Default value is None.
- :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature]
- :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber".
- Default value is None.
- :paramtype query_fields: list[str]
- :keyword output_content_format: Format of the analyze result top-level content. Known values
- are: "text" and "markdown". Default value is None.
- :paramtype output_content_format: str or ~azure.ai.documentintelligence.models.ContentFormat
- :keyword output: Additional outputs to generate during analysis. Default value is None.
- :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption]
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: An instance of AsyncAnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is
- compatible with MutableMapping
- :rtype: AsyncAnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
- @overload
- async def begin_analyze_document(
- self,
- model_id: str,
- analyze_request: Optional[JSON] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult]:
- """Analyzes document with document model.
-
- :param model_id: Unique document model name. Required.
- :type model_id: str
- :param analyze_request: Analyze request parameters. Default value is None.
- :type analyze_request: JSON
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
- None.
- :paramtype pages: str
- :keyword locale: Locale hint for text recognition and document analysis. Value may contain
- only
- the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is
- None.
- :paramtype locale: str
- :keyword string_index_type: Method used to compute string offset and length. Known values are:
- "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None.
- :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType
- :keyword features: List of optional analysis features. Default value is None.
- :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature]
- :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber".
- Default value is None.
- :paramtype query_fields: list[str]
- :keyword output_content_format: Format of the analyze result top-level content. Known values
- are: "text" and "markdown". Default value is None.
- :paramtype output_content_format: str or ~azure.ai.documentintelligence.models.ContentFormat
- :keyword output: Additional outputs to generate during analysis. Default value is None.
- :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption]
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: An instance of AsyncAnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is
- compatible with MutableMapping
- :rtype: AsyncAnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
- @overload
- async def begin_analyze_document(
- self,
- model_id: str,
- analyze_request: Optional[IO[bytes]] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult]:
- """Analyzes document with document model.
-
- :param model_id: Unique document model name. Required.
- :type model_id: str
- :param analyze_request: Analyze request parameters. Default value is None.
- :type analyze_request: IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
- None.
- :paramtype pages: str
- :keyword locale: Locale hint for text recognition and document analysis. Value may contain
- only
- the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is
- None.
- :paramtype locale: str
- :keyword string_index_type: Method used to compute string offset and length. Known values are:
- "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None.
- :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType
- :keyword features: List of optional analysis features. Default value is None.
- :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature]
- :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber".
- Default value is None.
- :paramtype query_fields: list[str]
- :keyword output_content_format: Format of the analyze result top-level content. Known values
- are: "text" and "markdown". Default value is None.
- :paramtype output_content_format: str or ~azure.ai.documentintelligence.models.ContentFormat
- :keyword output: Additional outputs to generate during analysis. Default value is None.
- :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: An instance of AsyncAnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is
- compatible with MutableMapping
- :rtype: AsyncAnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
- @distributed_trace_async
- async def begin_analyze_document( # type: ignore[override]
- self,
- model_id: str,
- analyze_request: Optional[Union[_models.AnalyzeDocumentRequest, JSON, IO[bytes]]] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- **kwargs: Any
- ) -> AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult]:
- """Analyzes document with document model.
-
- :param model_id: Unique document model name. Required.
- :type model_id: str
- :param analyze_request: Analyze request parameters. Is one of the following types:
- AnalyzeDocumentRequest, JSON, IO[bytes] Default value is None.
- :type analyze_request: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest or JSON or
- IO[bytes]
- :keyword pages: Range of 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is
- None.
- :paramtype pages: str
- :keyword locale: Locale hint for text recognition and document analysis. Value may contain
- only
- the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is
- None.
- :paramtype locale: str
- :keyword string_index_type: Method used to compute string offset and length. Known values are:
- "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None.
- :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType
- :keyword features: List of optional analysis features. Default value is None.
- :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature]
- :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber".
- Default value is None.
- :paramtype query_fields: list[str]
- :keyword output_content_format: Format of the analyze result top-level content. Known values
- are: "text" and "markdown". Default value is None.
- :paramtype output_content_format: str or ~azure.ai.documentintelligence.models.ContentFormat
- :keyword output: Additional outputs to generate during analysis. Default value is None.
- :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption]
- :return: An instance of AsyncAnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is
- compatible with MutableMapping
- :rtype: AsyncAnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None))
- cls: ClsType[_models.AnalyzeResult] = kwargs.pop("cls", None)
- polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = await self._analyze_document_initial(
- model_id=model_id,
- analyze_request=analyze_request,
- pages=pages,
- locale=locale,
- string_index_type=string_index_type,
- features=features,
- query_fields=query_fields,
- output_content_format=output_content_format,
- output=output,
- content_type=content_type,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs
- )
- await raw_result.http_response.read() # type: ignore
- kwargs.pop("error_map", None)
-
- def get_long_running_output(pipeline_response):
- response_headers = {}
- response = pipeline_response.http_response
- response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- response_headers["Operation-Location"] = self._deserialize(
- "str", response.headers.get("Operation-Location")
- )
-
- deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return deserialized
-
- path_format_arguments = {
- "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
- }
-
- if polling is True:
- polling_method: AsyncPollingMethod = cast(
- AsyncPollingMethod,
- AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
- )
- elif polling is False:
- polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
- else:
- polling_method = polling
- if cont_token:
- return AsyncAnalyzeDocumentLROPoller[_models.AnalyzeBatchResult].from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult](
- self._client, raw_result, get_long_running_output, polling_method # type: ignore
- )
-
- @distributed_trace_async
- async def begin_analyze_batch_documents( # type: ignore[override]
- self,
- model_id: str,
- analyze_batch_request: Optional[Union[_models.AnalyzeBatchDocumentsRequest, JSON, IO[bytes]]] = None,
- *,
- pages: Optional[str] = None,
- locale: Optional[str] = None,
- string_index_type: Optional[Union[str, _models.StringIndexType]] = None,
- features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None,
- query_fields: Optional[List[str]] = None,
- output_content_format: Optional[Union[str, _models.ContentFormat]] = None,
- output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None,
- **kwargs: Any
- ) -> AsyncLROPoller[_models.AnalyzeBatchResult]:
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- return await super().begin_analyze_batch_documents(
- model_id=model_id,
- analyze_batch_request=analyze_batch_request,
- pages=pages,
- locale=locale,
- string_index_type=string_index_type,
- features=features,
- query_fields=query_fields,
- output_content_format=output_content_format,
- output=output,
- polling=AsyncAnalyzeBatchDocumentsLROPollingMethod(timeout=lro_delay),
- **kwargs
- )
-
-
-__all__: List[str] = [
- "DocumentIntelligenceClientOperationsMixin",
- "DocumentIntelligenceAdministrationClientOperationsMixin",
-] # Add all objects you want publicly available to users at this package level
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
def patch_sdk():
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_patch.py
index 4ba9c2d81830..f7dd32510333 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_patch.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_patch.py
@@ -6,87 +6,9 @@
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
-from typing import Any, List, Union
+from typing import List
-from azure.core.credentials import AzureKeyCredential
-from azure.core.credentials_async import AsyncTokenCredential
-
-from ._client import (
- DocumentIntelligenceClient as DIClientGenerated,
- DocumentIntelligenceAdministrationClient as DIAClientGenerated,
-)
-from ..aio._operations._patch import AsyncAnalyzeDocumentLROPoller
-
-
-class DocumentIntelligenceClient(DIClientGenerated): # pylint: disable=client-accepts-api-version-keyword
- """DocumentIntelligenceClient.
-
- :param endpoint: The Document Intelligence service endpoint. Required.
- :type endpoint: str
- :param credential: Credential needed for the client to connect to Azure. Is either a
- AzureKeyCredential type or a TokenCredential type. Required.
- :type credential: ~azure.core.credentials.AzureKeyCredential or
- ~azure.core.credentials_async.AsyncTokenCredential
- :keyword api_version: The API version to use for this operation. Default value is
- "2024-07-31-preview". Note that overriding this default value may result in unsupported
- behavior.
- :paramtype api_version: str
- """
-
- def __init__(
- self,
- endpoint: str,
- credential: Union[AzureKeyCredential, AsyncTokenCredential],
- **kwargs: Any,
- ) -> None:
- # Patch the default polling interval to be 1s.
- polling_interval = kwargs.pop("polling_interval", 1)
- super().__init__(
- endpoint=endpoint,
- credential=credential,
- polling_interval=polling_interval,
- **kwargs,
- )
-
-
-class DocumentIntelligenceAdministrationClient(
- DIAClientGenerated
-): # pylint: disable=client-accepts-api-version-keyword
- """DocumentIntelligenceAdministrationClient.
-
- :param endpoint: The Document Intelligence service endpoint. Required.
- :type endpoint: str
- :param credential: Credential needed for the client to connect to Azure. Is either a
- AzureKeyCredential type or a TokenCredential type. Required.
- :type credential: ~azure.core.credentials.AzureKeyCredential or
- ~azure.core.credentials_async.AsyncTokenCredential
- :keyword api_version: The API version to use for this operation. Default value is
- "2024-07-31-preview". Note that overriding this default value may result in unsupported
- behavior.
- :paramtype api_version: str
- """
-
- def __init__(
- self,
- endpoint: str,
- credential: Union[AzureKeyCredential, AsyncTokenCredential],
- **kwargs: Any,
- ) -> None:
- # Patch the default polling interval to be 1s.
- polling_interval = kwargs.pop("polling_interval", 1)
- super().__init__(
- endpoint=endpoint,
- credential=credential,
- polling_interval=polling_interval,
- **kwargs,
- )
-
-
-__all__: List[str] = [
- "DocumentIntelligenceClient",
- "DocumentIntelligenceAdministrationClient",
- "AsyncAnalyzeDocumentLROPoller",
-] # Add all objects you want publicly available to users at this package level
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
def patch_sdk():
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/__init__.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/__init__.py
index 260c9effd5ed..1207ac80a628 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/__init__.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/__init__.py
@@ -11,7 +11,7 @@
from ._models import AnalyzeBatchOperationDetail
from ._models import AnalyzeBatchResult
from ._models import AnalyzeBatchResultOperation
-from ._patch import AnalyzeDocumentRequest
+from ._models import AnalyzeDocumentRequest
from ._models import AnalyzeResult
from ._models import AnalyzeResultOperation
from ._models import AuthorizeClassifierCopyRequest
@@ -23,7 +23,7 @@
from ._models import BuildDocumentModelRequest
from ._models import ClassifierCopyAuthorization
from ._models import ClassifierDocumentTypeDetails
-from ._patch import ClassifyDocumentRequest
+from ._models import ClassifyDocumentRequest
from ._models import ComposeDocumentModelRequest
from ._models import CopyAuthorization
from ._models import CurrencyValue
@@ -83,7 +83,8 @@
from ._enums import ParagraphRole
from ._enums import SplitMode
from ._enums import StringIndexType
-
+from ._patch import __all__ as _patch_all
+from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
@@ -164,5 +165,5 @@
"SplitMode",
"StringIndexType",
]
-
+__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_enums.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_enums.py
index 64c43bc8990c..de21b9aa550e 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_enums.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_enums.py
@@ -64,7 +64,7 @@ class DocumentAnalysisFeature(str, Enum, metaclass=CaseInsensitiveEnumMeta):
class DocumentBarcodeKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Barcode kind."""
- Q_R_CODE = "QRCode"
+ QR_CODE = "QRCode"
"""QR code, as defined in ISO/IEC 18004:2015."""
PDF417 = "PDF417"
"""PDF417, as defined in ISO 15438."""
@@ -90,7 +90,7 @@ class DocumentBarcodeKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""GS1 DataBar Expanded barcode."""
ITF = "ITF"
"""Interleaved 2 of 5 barcode, as defined in ANSI/AIM BC2-1995."""
- MICRO_Q_R_CODE = "MicroQRCode"
+ MICRO_QR_CODE = "MicroQRCode"
"""Micro QR code, as defined in ISO/IEC 23941:2022."""
AZTEC = "Aztec"
"""Aztec code, as defined in ISO/IEC 24778:2008."""
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_models.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_models.py
index ca357225b5aa..577ab5350c6b 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_models.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_models.py
@@ -2027,7 +2027,7 @@ class DocumentLanguage(_model_base.Model):
:ivar locale: Detected language. Value may an ISO 639-1 language code (ex. "en", "fr")
or BCP 47 language tag (ex. "zh-Hans"). Required.
- :vartype locale: str
+ :vartype locale: int
:ivar spans: Location of the text elements in the concatenated content the language applies
to. Required.
:vartype spans: list[~azure.ai.documentintelligence.models.DocumentSpan]
@@ -2035,7 +2035,7 @@ class DocumentLanguage(_model_base.Model):
:vartype confidence: float
"""
- locale: str = rest_field()
+ locale: int = rest_field()
"""Detected language. Value may an ISO 639-1 language code (ex. \"en\", \"fr\")
or BCP 47 language tag (ex. \"zh-Hans\"). Required."""
spans: List["_models.DocumentSpan"] = rest_field()
@@ -2048,7 +2048,7 @@ class DocumentLanguage(_model_base.Model):
def __init__(
self,
*,
- locale: str,
+ locale: int,
spans: List["_models.DocumentSpan"],
confidence: float,
): ...
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_patch.py
index c385980aebd5..f7dd32510333 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_patch.py
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_patch.py
@@ -6,45 +6,9 @@
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
-from typing import List, Optional
-from ._models import (
- AnalyzeDocumentRequest as GeneratedAnalyzeDocumentRequest,
- ClassifyDocumentRequest as GeneratedClassifyDocumentRequest,
-)
-from .._model_base import rest_field
+from typing import List
-
-class AnalyzeDocumentRequest(GeneratedAnalyzeDocumentRequest):
- """Document analysis parameters.
-
- :ivar url_source: Document URL to analyze. Either url_source or bytes_source must be specified.
- :vartype url_source: str
- :ivar bytes_source: Document bytes to analyze. Either url_source or bytes_source must be specified.
- :vartype bytes_source: bytes
- """
-
- bytes_source: Optional[bytes] = rest_field(name="base64Source", format="base64")
- """Document bytes to analyze. Either url_source or bytes_source must be specified."""
-
-
-class ClassifyDocumentRequest(GeneratedClassifyDocumentRequest):
- """Document classification parameters.
-
- :ivar url_source: Document URL to classify. Either url_source or bytes_source must be
- specified.
- :vartype url_source: str
- :ivar bytes_source: Document bytes to classify. Either url_source or bytes_source must be specified.
- :vartype bytes_source: bytes
- """
-
- bytes_source: Optional[bytes] = rest_field(name="base64Source", format="base64")
- """Document bytes to classify. Either url_source or bytes_source must be specified."""
-
-
-__all__: List[str] = [
- "AnalyzeDocumentRequest",
- "ClassifyDocumentRequest",
-] # Add all objects you want publicly available to users at this package level
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
def patch_sdk():
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/conftest.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/conftest.py
new file mode 100644
index 000000000000..a02bf102439b
--- /dev/null
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/conftest.py
@@ -0,0 +1,70 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import os
+import pytest
+from dotenv import load_dotenv
+from devtools_testutils import (
+ test_proxy,
+ add_general_regex_sanitizer,
+ add_body_key_sanitizer,
+ add_header_regex_sanitizer,
+)
+
+load_dotenv()
+
+
+# aovid record sensitive identity information in recordings
+@pytest.fixture(scope="session", autouse=True)
+def add_sanitizers(test_proxy):
+ documentintelligence_subscription_id = os.environ.get(
+ "DOCUMENTINTELLIGENCE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000"
+ )
+ documentintelligence_tenant_id = os.environ.get(
+ "DOCUMENTINTELLIGENCE_TENANT_ID", "00000000-0000-0000-0000-000000000000"
+ )
+ documentintelligence_client_id = os.environ.get(
+ "DOCUMENTINTELLIGENCE_CLIENT_ID", "00000000-0000-0000-0000-000000000000"
+ )
+ documentintelligence_client_secret = os.environ.get(
+ "DOCUMENTINTELLIGENCE_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(
+ regex=documentintelligence_subscription_id, value="00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(regex=documentintelligence_tenant_id, value="00000000-0000-0000-0000-000000000000")
+ add_general_regex_sanitizer(regex=documentintelligence_client_id, value="00000000-0000-0000-0000-000000000000")
+ add_general_regex_sanitizer(regex=documentintelligence_client_secret, value="00000000-0000-0000-0000-000000000000")
+
+ documentintelligenceadministration_subscription_id = os.environ.get(
+ "DOCUMENTINTELLIGENCEADMINISTRATION_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000"
+ )
+ documentintelligenceadministration_tenant_id = os.environ.get(
+ "DOCUMENTINTELLIGENCEADMINISTRATION_TENANT_ID", "00000000-0000-0000-0000-000000000000"
+ )
+ documentintelligenceadministration_client_id = os.environ.get(
+ "DOCUMENTINTELLIGENCEADMINISTRATION_CLIENT_ID", "00000000-0000-0000-0000-000000000000"
+ )
+ documentintelligenceadministration_client_secret = os.environ.get(
+ "DOCUMENTINTELLIGENCEADMINISTRATION_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(
+ regex=documentintelligenceadministration_subscription_id, value="00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(
+ regex=documentintelligenceadministration_tenant_id, value="00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(
+ regex=documentintelligenceadministration_client_id, value="00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(
+ regex=documentintelligenceadministration_client_secret, value="00000000-0000-0000-0000-000000000000"
+ )
+
+ add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]")
+ add_header_regex_sanitizer(key="Cookie", value="cookie;")
+ add_body_key_sanitizer(json_path="$..access_token", value="access_token")
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence.py
new file mode 100644
index 000000000000..a2d8979b91ef
--- /dev/null
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence.py
@@ -0,0 +1,72 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from devtools_testutils import recorded_by_proxy
+from testpreparer import DocumentIntelligenceClientTestBase, DocumentIntelligencePreparer
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestDocumentIntelligence(DocumentIntelligenceClientTestBase):
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy
+ def test_begin_analyze_document(self, documentintelligence_endpoint):
+ client = self.create_client(endpoint=documentintelligence_endpoint)
+ response = client.begin_analyze_document(
+ model_id="str",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy
+ def test_begin_analyze_batch_documents(self, documentintelligence_endpoint):
+ client = self.create_client(endpoint=documentintelligence_endpoint)
+ response = client.begin_analyze_batch_documents(
+ model_id="str",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy
+ def test_get_analyze_result_pdf(self, documentintelligence_endpoint):
+ client = self.create_client(endpoint=documentintelligence_endpoint)
+ response = client.get_analyze_result_pdf(
+ model_id="str",
+ result_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy
+ def test_get_analyze_result_figure(self, documentintelligence_endpoint):
+ client = self.create_client(endpoint=documentintelligence_endpoint)
+ response = client.get_analyze_result_figure(
+ model_id="str",
+ result_id="str",
+ figure_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy
+ def test_begin_classify_document(self, documentintelligence_endpoint):
+ client = self.create_client(endpoint=documentintelligence_endpoint)
+ response = client.begin_classify_document(
+ classifier_id="str",
+ classify_request={"base64Source": bytes("bytes", encoding="utf-8"), "urlSource": "str"},
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration.py
new file mode 100644
index 000000000000..a149bfd573ce
--- /dev/null
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration.py
@@ -0,0 +1,245 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from devtools_testutils import recorded_by_proxy
+from testpreparer import DocumentIntelligenceAdministrationClientTestBase, DocumentIntelligenceAdministrationPreparer
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestDocumentIntelligenceAdministration(DocumentIntelligenceAdministrationClientTestBase):
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_begin_build_document_model(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.begin_build_document_model(
+ build_request={
+ "buildMode": "str",
+ "modelId": "str",
+ "allowOverwrite": bool,
+ "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"},
+ "azureBlobSource": {"containerUrl": "str", "prefix": "str"},
+ "description": "str",
+ "maxTrainingHours": 0.0,
+ "tags": {"str": "str"},
+ },
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_begin_compose_model(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.begin_compose_model(
+ compose_request={
+ "classifierId": "str",
+ "docTypes": {
+ "str": {
+ "buildMode": "str",
+ "confidenceThreshold": 0.0,
+ "description": "str",
+ "features": ["str"],
+ "fieldConfidence": {"str": 0.0},
+ "fieldSchema": {
+ "str": {
+ "type": "str",
+ "description": "str",
+ "example": "str",
+ "items": ...,
+ "properties": {"str": ...},
+ }
+ },
+ "maxDocumentsToAnalyze": 0,
+ "modelId": "str",
+ "queryFields": ["str"],
+ }
+ },
+ "modelId": "str",
+ "description": "str",
+ "split": "str",
+ "tags": {"str": "str"},
+ },
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_authorize_model_copy(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.authorize_model_copy(
+ authorize_copy_request={"modelId": "str", "description": "str", "tags": {"str": "str"}},
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_begin_copy_model_to(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.begin_copy_model_to(
+ model_id="str",
+ copy_to_request={
+ "accessToken": "str",
+ "expirationDateTime": "2020-02-20 00:00:00",
+ "targetModelId": "str",
+ "targetModelLocation": "str",
+ "targetResourceId": "str",
+ "targetResourceRegion": "str",
+ },
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_get_model(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.get_model(
+ model_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_list_models(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.list_models()
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_delete_model(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.delete_model(
+ model_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_get_resource_info(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.get_resource_info()
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_get_operation(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.get_operation(
+ operation_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_list_operations(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.list_operations()
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_begin_build_classifier(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.begin_build_classifier(
+ build_request={
+ "classifierId": "str",
+ "docTypes": {
+ "str": {
+ "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"},
+ "azureBlobSource": {"containerUrl": "str", "prefix": "str"},
+ "sourceKind": "str",
+ }
+ },
+ "allowOverwrite": bool,
+ "baseClassifierId": "str",
+ "description": "str",
+ },
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_authorize_classifier_copy(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.authorize_classifier_copy(
+ authorize_copy_request={"classifierId": "str", "description": "str", "tags": {"str": "str"}},
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_begin_copy_classifier_to(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.begin_copy_classifier_to(
+ classifier_id="str",
+ copy_to_request={
+ "accessToken": "str",
+ "expirationDateTime": "2020-02-20 00:00:00",
+ "targetClassifierId": "str",
+ "targetClassifierLocation": "str",
+ "targetResourceId": "str",
+ "targetResourceRegion": "str",
+ },
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_get_classifier(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.get_classifier(
+ classifier_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_list_classifiers(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.list_classifiers()
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy
+ def test_delete_classifier(self, documentintelligenceadministration_endpoint):
+ client = self.create_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.delete_classifier(
+ classifier_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration_async.py
new file mode 100644
index 000000000000..7b1980282a18
--- /dev/null
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration_async.py
@@ -0,0 +1,256 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from devtools_testutils.aio import recorded_by_proxy_async
+from testpreparer import DocumentIntelligenceAdministrationPreparer
+from testpreparer_async import DocumentIntelligenceAdministrationClientTestBaseAsync
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestDocumentIntelligenceAdministrationAsync(DocumentIntelligenceAdministrationClientTestBaseAsync):
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_begin_build_document_model(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await (
+ await client.begin_build_document_model(
+ build_request={
+ "buildMode": "str",
+ "modelId": "str",
+ "allowOverwrite": bool,
+ "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"},
+ "azureBlobSource": {"containerUrl": "str", "prefix": "str"},
+ "description": "str",
+ "maxTrainingHours": 0.0,
+ "tags": {"str": "str"},
+ },
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_begin_compose_model(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await (
+ await client.begin_compose_model(
+ compose_request={
+ "classifierId": "str",
+ "docTypes": {
+ "str": {
+ "buildMode": "str",
+ "confidenceThreshold": 0.0,
+ "description": "str",
+ "features": ["str"],
+ "fieldConfidence": {"str": 0.0},
+ "fieldSchema": {
+ "str": {
+ "type": "str",
+ "description": "str",
+ "example": "str",
+ "items": ...,
+ "properties": {"str": ...},
+ }
+ },
+ "maxDocumentsToAnalyze": 0,
+ "modelId": "str",
+ "queryFields": ["str"],
+ }
+ },
+ "modelId": "str",
+ "description": "str",
+ "split": "str",
+ "tags": {"str": "str"},
+ },
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_authorize_model_copy(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await client.authorize_model_copy(
+ authorize_copy_request={"modelId": "str", "description": "str", "tags": {"str": "str"}},
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_begin_copy_model_to(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await (
+ await client.begin_copy_model_to(
+ model_id="str",
+ copy_to_request={
+ "accessToken": "str",
+ "expirationDateTime": "2020-02-20 00:00:00",
+ "targetModelId": "str",
+ "targetModelLocation": "str",
+ "targetResourceId": "str",
+ "targetResourceRegion": "str",
+ },
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_get_model(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await client.get_model(
+ model_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_list_models(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.list_models()
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_delete_model(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await client.delete_model(
+ model_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_get_resource_info(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await client.get_resource_info()
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_get_operation(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await client.get_operation(
+ operation_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_list_operations(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.list_operations()
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_begin_build_classifier(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await (
+ await client.begin_build_classifier(
+ build_request={
+ "classifierId": "str",
+ "docTypes": {
+ "str": {
+ "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"},
+ "azureBlobSource": {"containerUrl": "str", "prefix": "str"},
+ "sourceKind": "str",
+ }
+ },
+ "allowOverwrite": bool,
+ "baseClassifierId": "str",
+ "description": "str",
+ },
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_authorize_classifier_copy(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await client.authorize_classifier_copy(
+ authorize_copy_request={"classifierId": "str", "description": "str", "tags": {"str": "str"}},
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_begin_copy_classifier_to(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await (
+ await client.begin_copy_classifier_to(
+ classifier_id="str",
+ copy_to_request={
+ "accessToken": "str",
+ "expirationDateTime": "2020-02-20 00:00:00",
+ "targetClassifierId": "str",
+ "targetClassifierLocation": "str",
+ "targetResourceId": "str",
+ "targetResourceRegion": "str",
+ },
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_get_classifier(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await client.get_classifier(
+ classifier_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_list_classifiers(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = client.list_classifiers()
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligenceAdministrationPreparer()
+ @recorded_by_proxy_async
+ async def test_delete_classifier(self, documentintelligenceadministration_endpoint):
+ client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint)
+ response = await client.delete_classifier(
+ classifier_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_async.py
new file mode 100644
index 000000000000..e7fd894224fb
--- /dev/null
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_async.py
@@ -0,0 +1,79 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from devtools_testutils.aio import recorded_by_proxy_async
+from testpreparer import DocumentIntelligencePreparer
+from testpreparer_async import DocumentIntelligenceClientTestBaseAsync
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestDocumentIntelligenceAsync(DocumentIntelligenceClientTestBaseAsync):
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy_async
+ async def test_begin_analyze_document(self, documentintelligence_endpoint):
+ client = self.create_async_client(endpoint=documentintelligence_endpoint)
+ response = await (
+ await client.begin_analyze_document(
+ model_id="str",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy_async
+ async def test_begin_analyze_batch_documents(self, documentintelligence_endpoint):
+ client = self.create_async_client(endpoint=documentintelligence_endpoint)
+ response = await (
+ await client.begin_analyze_batch_documents(
+ model_id="str",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy_async
+ async def test_get_analyze_result_pdf(self, documentintelligence_endpoint):
+ client = self.create_async_client(endpoint=documentintelligence_endpoint)
+ response = await client.get_analyze_result_pdf(
+ model_id="str",
+ result_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy_async
+ async def test_get_analyze_result_figure(self, documentintelligence_endpoint):
+ client = self.create_async_client(endpoint=documentintelligence_endpoint)
+ response = await client.get_analyze_result_figure(
+ model_id="str",
+ result_id="str",
+ figure_id="str",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @DocumentIntelligencePreparer()
+ @recorded_by_proxy_async
+ async def test_begin_classify_document(self, documentintelligence_endpoint):
+ client = self.create_async_client(endpoint=documentintelligence_endpoint)
+ response = await (
+ await client.begin_classify_document(
+ classifier_id="str",
+ classify_request={"base64Source": bytes("bytes", encoding="utf-8"), "urlSource": "str"},
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer.py
new file mode 100644
index 000000000000..ed3b18488bf5
--- /dev/null
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer.py
@@ -0,0 +1,46 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient, DocumentIntelligenceClient
+from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer
+import functools
+
+
+class DocumentIntelligenceClientTestBase(AzureRecordedTestCase):
+
+ def create_client(self, endpoint):
+ credential = self.get_credential(DocumentIntelligenceClient)
+ return self.create_client_from_credential(
+ DocumentIntelligenceClient,
+ credential=credential,
+ endpoint=endpoint,
+ )
+
+
+DocumentIntelligencePreparer = functools.partial(
+ PowerShellPreparer,
+ "documentintelligence",
+ documentintelligence_endpoint="https://fake_documentintelligence_endpoint.com",
+)
+
+
+class DocumentIntelligenceAdministrationClientTestBase(AzureRecordedTestCase):
+
+ def create_client(self, endpoint):
+ credential = self.get_credential(DocumentIntelligenceAdministrationClient)
+ return self.create_client_from_credential(
+ DocumentIntelligenceAdministrationClient,
+ credential=credential,
+ endpoint=endpoint,
+ )
+
+
+DocumentIntelligenceAdministrationPreparer = functools.partial(
+ PowerShellPreparer,
+ "documentintelligenceadministration",
+ documentintelligenceadministration_endpoint="https://fake_documentintelligenceadministration_endpoint.com",
+)
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer_async.py
new file mode 100644
index 000000000000..fece6c220e81
--- /dev/null
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer_async.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from azure.ai.documentintelligence.aio import DocumentIntelligenceAdministrationClient, DocumentIntelligenceClient
+from devtools_testutils import AzureRecordedTestCase
+
+
+class DocumentIntelligenceClientTestBaseAsync(AzureRecordedTestCase):
+
+ def create_async_client(self, endpoint):
+ credential = self.get_credential(DocumentIntelligenceClient, is_async=True)
+ return self.create_client_from_credential(
+ DocumentIntelligenceClient,
+ credential=credential,
+ endpoint=endpoint,
+ )
+
+
+class DocumentIntelligenceAdministrationClientTestBaseAsync(AzureRecordedTestCase):
+
+ def create_async_client(self, endpoint):
+ credential = self.get_credential(DocumentIntelligenceAdministrationClient, is_async=True)
+ return self.create_client_from_credential(
+ DocumentIntelligenceAdministrationClient,
+ credential=credential,
+ endpoint=endpoint,
+ )
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/sdk_packaging.toml b/sdk/documentintelligence/azure-ai-documentintelligence/sdk_packaging.toml
new file mode 100644
index 000000000000..e7687fdae93b
--- /dev/null
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/sdk_packaging.toml
@@ -0,0 +1,2 @@
+[packaging]
+auto_update = false
\ No newline at end of file
diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/tsp-location.yaml b/sdk/documentintelligence/azure-ai-documentintelligence/tsp-location.yaml
index 18b1c4b0e7f4..c47675f59934 100644
--- a/sdk/documentintelligence/azure-ai-documentintelligence/tsp-location.yaml
+++ b/sdk/documentintelligence/azure-ai-documentintelligence/tsp-location.yaml
@@ -1,4 +1,4 @@
directory: specification/ai/DocumentIntelligence
-commit: ec2a81edaecf3970e5938936e8256759905163e6
-additionalDirectories: []
-repo: Azure/azure-rest-api-specs
+commit: e6ae33c2dc2c450ddd1147342b048a4ccd49323e
+repo: test-repo-billy/azure-rest-api-specs
+additionalDirectories: