From 49b7a37166237690efa938e1f1247eb3875f6448 Mon Sep 17 00:00:00 2001 From: azure-sdk Date: Wed, 13 Aug 2025 00:19:42 +0000 Subject: [PATCH] Configurations: 'specification/ai/DocumentIntelligence/tspconfig.yaml', API Version: 2025-09-01, SDK Release Type: beta, and CommitSHA: 'c03820dacaeb1a058d4914fce296e31b160111dc' in SpecRepo: 'https://github.com/Azure/azure-rest-api-specs' Pipeline run: https://dev.azure.com/azure-sdk/internal/_build/results?buildId=5212129 Refer to https://eng.ms/docs/products/azure-developer-experience/develop/sdk-release/sdk-release-prerequisites to prepare for SDK release. --- .../azure-ai-documentintelligence/MANIFEST.in | 2 +- .../_metadata.json | 7 + .../apiview-properties.json | 132 +++ .../azure/ai/documentintelligence/_client.py | 24 +- .../ai/documentintelligence/_configuration.py | 17 +- .../_operations/__init__.py | 10 +- .../_operations/_operations.py | 308 +++--- .../_operations/_patch.py | 726 +------------ .../azure/ai/documentintelligence/_patch.py | 90 +- .../documentintelligence/_utils/__init__.py | 6 + .../{_model_base.py => _utils/model_base.py} | 92 +- .../serialization.py} | 178 +--- .../ai/documentintelligence/_utils/utils.py | 25 + .../ai/documentintelligence/_validation.py | 20 +- .../azure/ai/documentintelligence/_vendor.py | 37 - .../azure/ai/documentintelligence/_version.py | 2 +- .../ai/documentintelligence/aio/_client.py | 24 +- .../aio/_configuration.py | 17 +- .../aio/_operations/__init__.py | 10 +- .../aio/_operations/_operations.py | 187 +++- .../aio/_operations/_patch.py | 708 +------------ .../ai/documentintelligence/aio/_patch.py | 89 +- .../ai/documentintelligence/aio/_vendor.py | 37 - .../ai/documentintelligence/models/_models.py | 986 +++++++++++------- .../ai/documentintelligence/models/_patch.py | 9 +- .../delete_analyze_batch_documents_result.py | 33 + .../delete_analyze_document_result.py | 33 + .../delete_document_classifier.py | 32 + .../delete_document_model.py | 32 + .../get_analyze_batch_documents_result.py | 34 + .../get_analyze_batch_documents_results.py | 34 + .../get_analyze_document_result_figure.py | 35 + .../get_analyze_document_result_fpdf.py | 34 + .../get_analyze_document_result_png.py | 34 + .../get_document_classifier.py | 33 + .../get_document_classifiers.py | 32 + .../get_document_model_custom.py | 33 + .../get_document_model_prebuilt.py | 33 + .../generated_samples/get_document_models.py | 32 + .../generated_samples/get_operation.py | 33 + .../generated_samples/get_operations.py | 32 + .../generated_samples/get_resource_details.py | 31 + .../generated_tests/conftest.py | 70 ++ .../test_document_intelligence.py | 139 +++ ...st_document_intelligence_administration.py | 245 +++++ ...ument_intelligence_administration_async.py | 256 +++++ .../test_document_intelligence_async.py | 146 +++ .../generated_tests/testpreparer.py | 46 + .../generated_tests/testpreparer_async.py | 31 + .../sample_analyze_addon_highres_async.py | 1 + .../sample_analyze_addon_languages_async.py | 1 + ...sample_analyze_addon_query_fields_async.py | 1 + .../sample_analyze_custom_documents_async.py | 1 + ...lyze_documents_output_in_markdown_async.py | 1 + .../sample_analyze_general_documents_async.py | 1 + ...sample_analyze_identity_documents_async.py | 1 + .../sample_analyze_invoices_async.py | 1 + ...nalyze_invoices_from_bytes_source_async.py | 1 + .../sample_analyze_layout_async.py | 1 + .../sample_analyze_read_async.py | 1 + .../sample_analyze_receipts_async.py | 1 + .../sample_analyze_receipts_from_url_async.py | 1 + .../sample_analyze_tax_us_w2_async.py | 1 + .../sample_classify_document_async.py | 1 + .../sample_compose_model_async.py | 1 + .../sample_copy_model_to_async.py | 1 + .../sample_get_raw_response_async.py | 1 + .../sample_manage_classifiers_async.py | 1 + .../sample_manage_models_async.py | 1 + .../sample_send_request_async.py | 1 + .../samples/sample_analyze_addon_highres.py | 1 + .../samples/sample_analyze_addon_languages.py | 1 + .../sample_analyze_addon_query_fields.py | 1 + .../sample_analyze_custom_documents.py | 1 + ...le_analyze_documents_output_in_markdown.py | 1 + .../sample_analyze_general_documents.py | 1 + .../sample_analyze_identity_documents.py | 1 + .../samples/sample_analyze_invoices.py | 1 + ...mple_analyze_invoices_from_bytes_source.py | 1 + .../samples/sample_analyze_layout.py | 1 + .../samples/sample_analyze_receipts.py | 1 + .../sample_analyze_receipts_from_url.py | 1 + .../samples/sample_analyze_tax_us_w2.py | 1 + .../samples/sample_classify_document.py | 1 + .../samples/sample_compose_model.py | 1 + .../sample_convert_to_and_from_dict.py | 2 +- .../samples/sample_copy_model_to.py | 1 + .../samples/sample_get_raw_response.py | 1 + .../samples/sample_manage_classifiers.py | 1 + .../samples/sample_manage_models.py | 1 + .../samples/sample_send_request.py | 1 + .../azure-ai-documentintelligence/setup.py | 15 +- .../tests/preparers.py | 1 + .../tests/test_dac_analyze_batch_documents.py | 1 + .../test_dac_analyze_batch_documents_async.py | 1 + .../tsp-location.yaml | 4 +- 96 files changed, 2857 insertions(+), 2444 deletions(-) create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/_metadata.json create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/apiview-properties.json create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/__init__.py rename sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/{_model_base.py => _utils/model_base.py} (93%) rename sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/{_serialization.py => _utils/serialization.py} (94%) create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/utils.py delete mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_vendor.py delete mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_vendor.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_analyze_batch_documents_result.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_analyze_document_result.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_document_classifier.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_document_model.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_batch_documents_result.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_batch_documents_results.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_figure.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_fpdf.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_png.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_classifier.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_classifiers.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_model_custom.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_model_prebuilt.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_models.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_operation.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_operations.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_resource_details.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/conftest.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration_async.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_async.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer.py create mode 100644 sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer_async.py diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/MANIFEST.in b/sdk/documentintelligence/azure-ai-documentintelligence/MANIFEST.in index 01a92a6714ff..4f69e6426fc7 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/MANIFEST.in +++ b/sdk/documentintelligence/azure-ai-documentintelligence/MANIFEST.in @@ -4,4 +4,4 @@ include azure/ai/documentintelligence/py.typed recursive-include tests *.py recursive-include samples *.py *.md include azure/__init__.py -include azure/ai/__init__.py \ No newline at end of file +include azure/ai/__init__.py diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/_metadata.json b/sdk/documentintelligence/azure-ai-documentintelligence/_metadata.json new file mode 100644 index 000000000000..74532b93a8fa --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/_metadata.json @@ -0,0 +1,7 @@ +{ + "apiVersion": "2025-09-01", + "commit": "c03820dacaeb1a058d4914fce296e31b160111dc", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/ai/DocumentIntelligence", + "emitterVersion": "0.48.1" +} \ No newline at end of file diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/apiview-properties.json b/sdk/documentintelligence/azure-ai-documentintelligence/apiview-properties.json new file mode 100644 index 000000000000..1a9ec59d5e40 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/apiview-properties.json @@ -0,0 +1,132 @@ +{ + "CrossLanguagePackageId": "DocumentIntelligence", + "CrossLanguageDefinitionId": { + "azure.ai.documentintelligence.models.AddressValue": "DocumentIntelligence.AddressValue", + "azure.ai.documentintelligence.models.AnalyzeBatchDocumentsRequest": "DocumentIntelligence.AnalyzeBatchDocumentsRequest", + "azure.ai.documentintelligence.models.AnalyzeBatchOperation": "DocumentIntelligence.AnalyzeBatchOperation", + "azure.ai.documentintelligence.models.AnalyzeBatchOperationDetail": "DocumentIntelligence.AnalyzeBatchOperationDetail", + "azure.ai.documentintelligence.models.AnalyzeBatchResult": "DocumentIntelligence.AnalyzeBatchResult", + "azure.ai.documentintelligence.models.AnalyzedDocument": "DocumentIntelligence.AnalyzedDocument", + "azure.ai.documentintelligence.models.AnalyzeDocumentRequest": "DocumentIntelligence.AnalyzeDocumentRequest", + "azure.ai.documentintelligence.models.AnalyzeResult": "DocumentIntelligence.AnalyzeResult", + "azure.ai.documentintelligence.models.AuthorizeClassifierCopyRequest": "DocumentIntelligence.AuthorizeClassifierCopyRequest", + "azure.ai.documentintelligence.models.AuthorizeCopyRequest": "DocumentIntelligence.AuthorizeCopyRequest", + "azure.ai.documentintelligence.models.AzureBlobContentSource": "DocumentIntelligence.AzureBlobContentSource", + "azure.ai.documentintelligence.models.AzureBlobFileListContentSource": "DocumentIntelligence.AzureBlobFileListContentSource", + "azure.ai.documentintelligence.models.BoundingRegion": "DocumentIntelligence.BoundingRegion", + "azure.ai.documentintelligence.models.BuildDocumentClassifierRequest": "DocumentIntelligence.BuildDocumentClassifierRequest", + "azure.ai.documentintelligence.models.BuildDocumentModelRequest": "DocumentIntelligence.BuildDocumentModelRequest", + "azure.ai.documentintelligence.models.ClassifierCopyAuthorization": "DocumentIntelligence.ClassifierCopyAuthorization", + "azure.ai.documentintelligence.models.ClassifierDocumentTypeDetails": "DocumentIntelligence.ClassifierDocumentTypeDetails", + "azure.ai.documentintelligence.models.ClassifyDocumentRequest": "DocumentIntelligence.ClassifyDocumentRequest", + "azure.ai.documentintelligence.models.ComposeDocumentModelRequest": "DocumentIntelligence.ComposeDocumentModelRequest", + "azure.ai.documentintelligence.models.CurrencyValue": "DocumentIntelligence.CurrencyValue", + "azure.ai.documentintelligence.models.CustomDocumentModelsDetails": "DocumentIntelligence.CustomDocumentModelsDetails", + "azure.ai.documentintelligence.models.DocumentBarcode": "DocumentIntelligence.DocumentBarcode", + "azure.ai.documentintelligence.models.DocumentCaption": "DocumentIntelligence.DocumentCaption", + "azure.ai.documentintelligence.models.DocumentIntelligenceOperationDetails": "DocumentIntelligence.DocumentIntelligenceOperationDetails", + "azure.ai.documentintelligence.models.DocumentClassifierBuildOperationDetails": "DocumentIntelligence.DocumentClassifierBuildOperationDetails", + "azure.ai.documentintelligence.models.DocumentClassifierCopyToOperationDetails": "DocumentIntelligence.DocumentClassifierCopyToOperationDetails", + "azure.ai.documentintelligence.models.DocumentClassifierDetails": "DocumentIntelligence.DocumentClassifierDetails", + "azure.ai.documentintelligence.models.DocumentField": "DocumentIntelligence.DocumentField", + "azure.ai.documentintelligence.models.DocumentFieldSchema": "DocumentIntelligence.DocumentFieldSchema", + "azure.ai.documentintelligence.models.DocumentFigure": "DocumentIntelligence.DocumentFigure", + "azure.ai.documentintelligence.models.DocumentFootnote": "DocumentIntelligence.DocumentFootnote", + "azure.ai.documentintelligence.models.DocumentFormula": "DocumentIntelligence.DocumentFormula", + "azure.ai.documentintelligence.models.DocumentIntelligenceError": "DocumentIntelligence.DocumentIntelligenceError", + "azure.ai.documentintelligence.models.DocumentIntelligenceErrorResponse": "DocumentIntelligence.DocumentIntelligenceErrorResponse", + "azure.ai.documentintelligence.models.DocumentIntelligenceInnerError": "DocumentIntelligence.DocumentIntelligenceInnerError", + "azure.ai.documentintelligence.models.DocumentIntelligenceResourceDetails": "DocumentIntelligence.DocumentIntelligenceResourceDetails", + "azure.ai.documentintelligence.models.DocumentIntelligenceWarning": "DocumentIntelligence.DocumentIntelligenceWarning", + "azure.ai.documentintelligence.models.DocumentKeyValueElement": "DocumentIntelligence.DocumentKeyValueElement", + "azure.ai.documentintelligence.models.DocumentKeyValuePair": "DocumentIntelligence.DocumentKeyValuePair", + "azure.ai.documentintelligence.models.DocumentLanguage": "DocumentIntelligence.DocumentLanguage", + "azure.ai.documentintelligence.models.DocumentLine": "DocumentIntelligence.DocumentLine", + "azure.ai.documentintelligence.models.DocumentModelBuildOperationDetails": "DocumentIntelligence.DocumentModelBuildOperationDetails", + "azure.ai.documentintelligence.models.DocumentModelComposeOperationDetails": "DocumentIntelligence.DocumentModelComposeOperationDetails", + "azure.ai.documentintelligence.models.DocumentModelCopyToOperationDetails": "DocumentIntelligence.DocumentModelCopyToOperationDetails", + "azure.ai.documentintelligence.models.DocumentModelDetails": "DocumentIntelligence.DocumentModelDetails", + "azure.ai.documentintelligence.models.DocumentPage": "DocumentIntelligence.DocumentPage", + "azure.ai.documentintelligence.models.DocumentParagraph": "DocumentIntelligence.DocumentParagraph", + "azure.ai.documentintelligence.models.DocumentSection": "DocumentIntelligence.DocumentSection", + "azure.ai.documentintelligence.models.DocumentSelectionMark": "DocumentIntelligence.DocumentSelectionMark", + "azure.ai.documentintelligence.models.DocumentSpan": "DocumentIntelligence.DocumentSpan", + "azure.ai.documentintelligence.models.DocumentStyle": "DocumentIntelligence.DocumentStyle", + "azure.ai.documentintelligence.models.DocumentTable": "DocumentIntelligence.DocumentTable", + "azure.ai.documentintelligence.models.DocumentTableCell": "DocumentIntelligence.DocumentTableCell", + "azure.ai.documentintelligence.models.DocumentTypeDetails": "DocumentIntelligence.DocumentTypeDetails", + "azure.ai.documentintelligence.models.DocumentWord": "DocumentIntelligence.DocumentWord", + "azure.ai.documentintelligence.models.ModelCopyAuthorization": "DocumentIntelligence.ModelCopyAuthorization", + "azure.ai.documentintelligence.models.StringIndexType": "DocumentIntelligence.StringIndexType", + "azure.ai.documentintelligence.models.DocumentContentFormat": "DocumentIntelligence.DocumentContentFormat", + "azure.ai.documentintelligence.models.LengthUnit": "DocumentIntelligence.LengthUnit", + "azure.ai.documentintelligence.models.DocumentSelectionMarkState": "DocumentIntelligence.DocumentSelectionMarkState", + "azure.ai.documentintelligence.models.DocumentBarcodeKind": "DocumentIntelligence.DocumentBarcodeKind", + "azure.ai.documentintelligence.models.DocumentFormulaKind": "DocumentIntelligence.DocumentFormulaKind", + "azure.ai.documentintelligence.models.ParagraphRole": "DocumentIntelligence.ParagraphRole", + "azure.ai.documentintelligence.models.DocumentTableCellKind": "DocumentIntelligence.DocumentTableCellKind", + "azure.ai.documentintelligence.models.DocumentFontStyle": "DocumentIntelligence.DocumentFontStyle", + "azure.ai.documentintelligence.models.DocumentFontWeight": "DocumentIntelligence.DocumentFontWeight", + "azure.ai.documentintelligence.models.DocumentFieldType": "DocumentIntelligence.DocumentFieldType", + "azure.ai.documentintelligence.models.DocumentSignatureType": "DocumentIntelligence.DocumentSignatureType", + "azure.ai.documentintelligence.models.DocumentAnalysisFeature": "DocumentIntelligence.DocumentAnalysisFeature", + "azure.ai.documentintelligence.models.AnalyzeOutputOption": "DocumentIntelligence.AnalyzeOutputOption", + "azure.ai.documentintelligence.models.DocumentIntelligenceOperationStatus": "DocumentIntelligence.DocumentIntelligenceOperationStatus", + "azure.ai.documentintelligence.models.SplitMode": "DocumentIntelligence.SplitMode", + "azure.ai.documentintelligence.models.DocumentBuildMode": "DocumentIntelligence.DocumentBuildMode", + "azure.ai.documentintelligence.models.OperationKind": "DocumentIntelligence.OperationKind", + "azure.ai.documentintelligence.models.ContentSourceKind": "DocumentIntelligence.ContentSourceKind", + "azure.ai.documentintelligence.DocumentIntelligenceClient.begin_analyze_document": "ClientCustomizations.DocumentIntelligenceClient.analyzeDocument", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.begin_analyze_document": "ClientCustomizations.DocumentIntelligenceClient.analyzeDocument", + "azure.ai.documentintelligence.DocumentIntelligenceClient.get_analyze_result_pdf": "ClientCustomizations.DocumentIntelligenceClient.getAnalyzeResultPdf", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.get_analyze_result_pdf": "ClientCustomizations.DocumentIntelligenceClient.getAnalyzeResultPdf", + "azure.ai.documentintelligence.DocumentIntelligenceClient.get_analyze_result_png": "ClientCustomizations.DocumentIntelligenceClient.getAnalyzeResultPng", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.get_analyze_result_png": "ClientCustomizations.DocumentIntelligenceClient.getAnalyzeResultPng", + "azure.ai.documentintelligence.DocumentIntelligenceClient.get_analyze_result_figure": "ClientCustomizations.DocumentIntelligenceClient.getAnalyzeResultFigure", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.get_analyze_result_figure": "ClientCustomizations.DocumentIntelligenceClient.getAnalyzeResultFigure", + "azure.ai.documentintelligence.DocumentIntelligenceClient.delete_analyze_result": "ClientCustomizations.DocumentIntelligenceClient.deleteAnalyzeResult", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.delete_analyze_result": "ClientCustomizations.DocumentIntelligenceClient.deleteAnalyzeResult", + "azure.ai.documentintelligence.DocumentIntelligenceClient.begin_analyze_batch_documents": "ClientCustomizations.DocumentIntelligenceClient.analyzeBatchDocuments", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.begin_analyze_batch_documents": "ClientCustomizations.DocumentIntelligenceClient.analyzeBatchDocuments", + "azure.ai.documentintelligence.DocumentIntelligenceClient.list_analyze_batch_results": "ClientCustomizations.DocumentIntelligenceClient.listAnalyzeBatchResults", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.list_analyze_batch_results": "ClientCustomizations.DocumentIntelligenceClient.listAnalyzeBatchResults", + "azure.ai.documentintelligence.DocumentIntelligenceClient.delete_analyze_batch_result": "ClientCustomizations.DocumentIntelligenceClient.deleteAnalyzeBatchResult", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.delete_analyze_batch_result": "ClientCustomizations.DocumentIntelligenceClient.deleteAnalyzeBatchResult", + "azure.ai.documentintelligence.DocumentIntelligenceClient.get_analyze_batch_result": "ClientCustomizations.DocumentIntelligenceClient.getAnalyzeBatchResult", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.get_analyze_batch_result": "ClientCustomizations.DocumentIntelligenceClient.getAnalyzeBatchResult", + "azure.ai.documentintelligence.DocumentIntelligenceClient.begin_classify_document": "ClientCustomizations.DocumentIntelligenceClient.classifyDocument", + "azure.ai.documentintelligence.aio.DocumentIntelligenceClient.begin_classify_document": "ClientCustomizations.DocumentIntelligenceClient.classifyDocument", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.begin_build_document_model": "ClientCustomizations.DocumentIntelligenceAdministrationClient.buildDocumentModel", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.begin_build_document_model": "ClientCustomizations.DocumentIntelligenceAdministrationClient.buildDocumentModel", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.begin_compose_model": "ClientCustomizations.DocumentIntelligenceAdministrationClient.composeModel", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.begin_compose_model": "ClientCustomizations.DocumentIntelligenceAdministrationClient.composeModel", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.authorize_model_copy": "ClientCustomizations.DocumentIntelligenceAdministrationClient.authorizeModelCopy", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.authorize_model_copy": "ClientCustomizations.DocumentIntelligenceAdministrationClient.authorizeModelCopy", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.begin_copy_model_to": "ClientCustomizations.DocumentIntelligenceAdministrationClient.copyModelTo", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.begin_copy_model_to": "ClientCustomizations.DocumentIntelligenceAdministrationClient.copyModelTo", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.get_model": "ClientCustomizations.DocumentIntelligenceAdministrationClient.getModel", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.get_model": "ClientCustomizations.DocumentIntelligenceAdministrationClient.getModel", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.list_models": "ClientCustomizations.DocumentIntelligenceAdministrationClient.listModels", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.list_models": "ClientCustomizations.DocumentIntelligenceAdministrationClient.listModels", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.delete_model": "ClientCustomizations.DocumentIntelligenceAdministrationClient.deleteModel", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.delete_model": "ClientCustomizations.DocumentIntelligenceAdministrationClient.deleteModel", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.get_resource_details": "ClientCustomizations.DocumentIntelligenceAdministrationClient.getResourceDetails", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.get_resource_details": "ClientCustomizations.DocumentIntelligenceAdministrationClient.getResourceDetails", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.get_operation": "ClientCustomizations.DocumentIntelligenceAdministrationClient.getOperation", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.get_operation": "ClientCustomizations.DocumentIntelligenceAdministrationClient.getOperation", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.list_operations": "ClientCustomizations.DocumentIntelligenceAdministrationClient.listOperations", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.list_operations": "ClientCustomizations.DocumentIntelligenceAdministrationClient.listOperations", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.begin_build_classifier": "ClientCustomizations.DocumentIntelligenceAdministrationClient.buildClassifier", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.begin_build_classifier": "ClientCustomizations.DocumentIntelligenceAdministrationClient.buildClassifier", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.authorize_classifier_copy": "ClientCustomizations.DocumentIntelligenceAdministrationClient.authorizeClassifierCopy", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.authorize_classifier_copy": "ClientCustomizations.DocumentIntelligenceAdministrationClient.authorizeClassifierCopy", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.begin_copy_classifier_to": "ClientCustomizations.DocumentIntelligenceAdministrationClient.copyClassifierTo", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.begin_copy_classifier_to": "ClientCustomizations.DocumentIntelligenceAdministrationClient.copyClassifierTo", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.get_classifier": "ClientCustomizations.DocumentIntelligenceAdministrationClient.getClassifier", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.get_classifier": "ClientCustomizations.DocumentIntelligenceAdministrationClient.getClassifier", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.list_classifiers": "ClientCustomizations.DocumentIntelligenceAdministrationClient.listClassifiers", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.list_classifiers": "ClientCustomizations.DocumentIntelligenceAdministrationClient.listClassifiers", + "azure.ai.documentintelligence.DocumentIntelligenceAdministrationClient.delete_classifier": "ClientCustomizations.DocumentIntelligenceAdministrationClient.deleteClassifier", + "azure.ai.documentintelligence.aio.DocumentIntelligenceAdministrationClient.delete_classifier": "ClientCustomizations.DocumentIntelligenceAdministrationClient.deleteClassifier" + } +} \ No newline at end of file diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_client.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_client.py index a0fc5dce5b25..25df06e4a6da 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_client.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_client.py @@ -20,25 +20,25 @@ DocumentIntelligenceClientConfiguration, ) from ._operations import ( - DocumentIntelligenceAdministrationClientOperationsMixin, - DocumentIntelligenceClientOperationsMixin, + _DocumentIntelligenceAdministrationClientOperationsMixin, + _DocumentIntelligenceClientOperationsMixin, ) -from ._serialization import Deserializer, Serializer +from ._utils.serialization import Deserializer, Serializer if TYPE_CHECKING: from azure.core.credentials import TokenCredential -class DocumentIntelligenceClient(DocumentIntelligenceClientOperationsMixin): +class DocumentIntelligenceClient(_DocumentIntelligenceClientOperationsMixin): """DocumentIntelligenceClient. :param endpoint: The Document Intelligence service endpoint. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2024-11-30". + :keyword api_version: The API version to use for this operation. Default value is "2025-09-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no @@ -48,6 +48,7 @@ class DocumentIntelligenceClient(DocumentIntelligenceClientOperationsMixin): def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: _endpoint = "{endpoint}/documentintelligence" self._config = DocumentIntelligenceClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -108,16 +109,16 @@ def __exit__(self, *exc_details: Any) -> None: self._client.__exit__(*exc_details) -class DocumentIntelligenceAdministrationClient(DocumentIntelligenceAdministrationClientOperationsMixin): +class DocumentIntelligenceAdministrationClient(_DocumentIntelligenceAdministrationClientOperationsMixin): """DocumentIntelligenceAdministrationClient. :param endpoint: The Document Intelligence service endpoint. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2024-11-30". + :keyword api_version: The API version to use for this operation. Default value is "2025-09-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no @@ -129,6 +130,7 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCr self._config = DocumentIntelligenceAdministrationClientConfiguration( endpoint=endpoint, credential=credential, **kwargs ) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_configuration.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_configuration.py index 791799f9cf28..e29a557037f7 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_configuration.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_configuration.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -25,17 +26,17 @@ class DocumentIntelligenceClientConfiguration: # pylint: disable=too-many-insta :param endpoint: The Document Intelligence service endpoint. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2024-11-30". + :keyword api_version: The API version to use for this operation. Default value is "2025-09-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-11-30") + api_version: str = kwargs.pop("api_version", "2025-09-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") @@ -79,17 +80,17 @@ class DocumentIntelligenceAdministrationClientConfiguration: # pylint: disable= :param endpoint: The Document Intelligence service endpoint. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2024-11-30". + :keyword api_version: The API version to use for this operation. Default value is "2025-09-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-11-30") + api_version: str = kwargs.pop("api_version", "2025-09-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/__init__.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/__init__.py index 63b6bbb81ed8..0ab0b5483608 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/__init__.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/__init__.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -12,16 +13,13 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import DocumentIntelligenceClientOperationsMixin # type: ignore -from ._operations import DocumentIntelligenceAdministrationClientOperationsMixin # type: ignore +from ._operations import _DocumentIntelligenceClientOperationsMixin # type: ignore # pylint: disable=unused-import +from ._operations import _DocumentIntelligenceAdministrationClientOperationsMixin # type: ignore # pylint: disable=unused-import from ._patch import __all__ as _patch_all from ._patch import * from ._patch import patch_sdk as _patch_sdk -__all__ = [ - "DocumentIntelligenceClientOperationsMixin", - "DocumentIntelligenceAdministrationClientOperationsMixin", -] +__all__ = [] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_operations.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_operations.py index fa4b890381eb..c75df794af6c 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_operations.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_operations.py @@ -6,12 +6,13 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase import json -import sys -from typing import Any, Callable, Dict, IO, Iterable, Iterator, List, Optional, TypeVar, Union, cast, overload +from typing import Any, Callable, Dict, IO, Iterator, List, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -31,15 +32,16 @@ from azure.core.utils import case_insensitive_dict from .. import models as _models -from .._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize -from .._serialization import Serializer -from .._vendor import DocumentIntelligenceAdministrationClientMixinABC, DocumentIntelligenceClientMixinABC - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +from .._configuration import ( + DocumentIntelligenceAdministrationClientConfiguration, + DocumentIntelligenceClientConfiguration, +) +from .._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from .._utils.serialization import Serializer +from .._utils.utils import ClientMixinABC +from .._validation import api_version_validation + +JSON = MutableMapping[str, Any] T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] @@ -63,9 +65,7 @@ def build_document_intelligence_analyze_document_request( # pylint: disable=nam _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentModels/{modelId}:analyze" path_format_arguments = { @@ -94,7 +94,6 @@ def build_document_intelligence_analyze_document_request( # pylint: disable=nam # Construct headers if content_type is not None: _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -105,7 +104,7 @@ def build_document_intelligence_get_analyze_result_pdf_request( # pylint: disab _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/pdf") # Construct URL @@ -126,13 +125,40 @@ def build_document_intelligence_get_analyze_result_pdf_request( # pylint: disab return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) +def build_document_intelligence_get_analyze_result_png_request( # pylint: disable=name-too-long + model_id: str, result_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) + accept = _headers.pop("Accept", "application/pdf") + + # Construct URL + _url = "/documentModels/{modelId}/analyzeResults/{resultId}/png" + path_format_arguments = { + "modelId": _SERIALIZER.url("model_id", model_id, "str"), + "resultId": _SERIALIZER.url("result_id", result_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + def build_document_intelligence_get_analyze_result_figure_request( # pylint: disable=name-too-long model_id: str, result_id: str, figure_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "image/png") # Construct URL @@ -157,12 +183,9 @@ def build_document_intelligence_get_analyze_result_figure_request( # pylint: di def build_document_intelligence_delete_analyze_result_request( # pylint: disable=name-too-long model_id: str, result_id: str, **kwargs: Any ) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentModels/{modelId}/analyzeResults/{resultId}" path_format_arguments = { @@ -175,10 +198,7 @@ def build_document_intelligence_delete_analyze_result_request( # pylint: disabl # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) def build_document_intelligence_analyze_batch_documents_request( # pylint: disable=name-too-long @@ -197,9 +217,7 @@ def build_document_intelligence_analyze_batch_documents_request( # pylint: disa _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentModels/{modelId}:analyzeBatch" path_format_arguments = { @@ -228,7 +246,6 @@ def build_document_intelligence_analyze_batch_documents_request( # pylint: disa # Construct headers if content_type is not None: _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -239,7 +256,7 @@ def build_document_intelligence_list_analyze_batch_results_request( # pylint: d _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -262,12 +279,9 @@ def build_document_intelligence_list_analyze_batch_results_request( # pylint: d def build_document_intelligence_delete_analyze_batch_result_request( # pylint: disable=name-too-long model_id: str, result_id: str, **kwargs: Any ) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentModels/{modelId}/analyzeBatchResults/{resultId}" path_format_arguments = { @@ -280,10 +294,7 @@ def build_document_intelligence_delete_analyze_batch_result_request( # pylint: # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) def build_document_intelligence_get_analyze_batch_result_request( # pylint: disable=name-too-long @@ -292,7 +303,7 @@ def build_document_intelligence_get_analyze_batch_result_request( # pylint: dis _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -325,9 +336,7 @@ def build_document_intelligence_classify_document_request( # pylint: disable=na _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentClassifiers/{classifierId}:analyze" path_format_arguments = { @@ -348,7 +357,6 @@ def build_document_intelligence_classify_document_request( # pylint: disable=na # Construct headers if content_type is not None: _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -360,9 +368,7 @@ def build_document_intelligence_administration_build_document_model_request( # _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentModels:build" @@ -372,7 +378,6 @@ def build_document_intelligence_administration_build_document_model_request( # # Construct headers if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -384,9 +389,7 @@ def build_document_intelligence_administration_compose_model_request( # pylint: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentModels:compose" @@ -396,7 +399,6 @@ def build_document_intelligence_administration_compose_model_request( # pylint: # Construct headers if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -408,7 +410,7 @@ def build_document_intelligence_administration_authorize_model_copy_request( # _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -432,9 +434,7 @@ def build_document_intelligence_administration_copy_model_to_request( # pylint: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentModels/{modelId}:copyTo" path_format_arguments = { @@ -449,7 +449,6 @@ def build_document_intelligence_administration_copy_model_to_request( # pylint: # Construct headers if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -460,7 +459,7 @@ def build_document_intelligence_administration_get_model_request( # pylint: dis _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -486,7 +485,7 @@ def build_document_intelligence_administration_list_models_request( # pylint: d _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -507,9 +506,7 @@ def build_document_intelligence_administration_delete_model_request( # pylint: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentModels/{modelId}" path_format_arguments = { @@ -522,7 +519,6 @@ def build_document_intelligence_administration_delete_model_request( # pylint: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) @@ -533,7 +529,7 @@ def build_document_intelligence_administration_get_resource_details_request( # _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -554,7 +550,7 @@ def build_document_intelligence_administration_get_operation_request( # pylint: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -580,7 +576,7 @@ def build_document_intelligence_administration_list_operations_request( # pylin _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -602,9 +598,7 @@ def build_document_intelligence_administration_build_classifier_request( # pyli _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentClassifiers:build" @@ -614,7 +608,6 @@ def build_document_intelligence_administration_build_classifier_request( # pyli # Construct headers if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -626,7 +619,7 @@ def build_document_intelligence_administration_authorize_classifier_copy_request _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -650,9 +643,7 @@ def build_document_intelligence_administration_copy_classifier_to_request( # py _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentClassifiers/{classifierId}:copyTo" path_format_arguments = { @@ -667,7 +658,6 @@ def build_document_intelligence_administration_copy_classifier_to_request( # py # Construct headers if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -678,7 +668,7 @@ def build_document_intelligence_administration_get_classifier_request( # pylint _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -704,7 +694,7 @@ def build_document_intelligence_administration_list_classifiers_request( # pyli _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -725,9 +715,7 @@ def build_document_intelligence_administration_delete_classifier_request( # pyl _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-30")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-09-01")) # Construct URL _url = "/documentClassifiers/{classifierId}" path_format_arguments = { @@ -740,12 +728,13 @@ def build_document_intelligence_administration_delete_classifier_request( # pyl _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -class DocumentIntelligenceClientOperationsMixin(DocumentIntelligenceClientMixinABC): # pylint: disable=name-too-long +class _DocumentIntelligenceClientOperationsMixin( + ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], DocumentIntelligenceClientConfiguration] +): def _analyze_document_initial( self, @@ -815,7 +804,7 @@ def _analyze_document_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -1069,7 +1058,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult")) + deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -1148,7 +1137,76 @@ def get_analyze_result_pdf(self, model_id: str, result_id: str, **kwargs: Any) - except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2025-09-01", + params_added_on={"2025-09-01": ["api_version", "model_id", "result_id", "accept"]}, + api_versions_list=["2025-09-01"], + ) + def get_analyze_result_png(self, model_id: str, result_id: str, **kwargs: Any) -> Iterator[bytes]: + """Gets the generated searchable PNG output from document analysis. + + :param model_id: Unique document model name. Required. + :type model_id: str + :param result_id: Analyze operation result ID. Required. + :type result_id: str + :return: Iterator[bytes] + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_document_intelligence_get_analyze_result_png_request( + model_id=model_id, + result_id=result_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -1217,7 +1275,7 @@ def get_analyze_result_figure( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -1278,7 +1336,7 @@ def delete_analyze_result( # pylint: disable=inconsistent-return-statements if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if cls: @@ -1352,7 +1410,7 @@ def _analyze_batch_documents_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -1607,7 +1665,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.AnalyzeBatchResult, response.json().get("result")) + deserialized = _deserialize(_models.AnalyzeBatchResult, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -1636,7 +1694,7 @@ def get_long_running_output(pipeline_response): ) @distributed_trace - def list_analyze_batch_results(self, model_id: str, **kwargs: Any) -> Iterable["_models.AnalyzeBatchOperation"]: + def list_analyze_batch_results(self, model_id: str, **kwargs: Any) -> ItemPaged["_models.AnalyzeBatchOperation"]: """List batch document analysis results. :param model_id: Unique document model name. Required. @@ -1699,7 +1757,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.AnalyzeBatchOperation], deserialized["value"]) + list_of_elem = _deserialize(List[_models.AnalyzeBatchOperation], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, iter(list_of_elem) @@ -1715,7 +1773,7 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -1770,7 +1828,7 @@ def delete_analyze_batch_result( # pylint: disable=inconsistent-return-statemen if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if cls: @@ -1827,7 +1885,7 @@ def get_analyze_batch_result(self, model_id: str, result_id: str, **kwargs: Any) except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1900,7 +1958,7 @@ def _classify_document_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2082,7 +2140,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult")) + deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -2111,8 +2169,8 @@ def get_long_running_output(pipeline_response): ) -class DocumentIntelligenceAdministrationClientOperationsMixin( # pylint: disable=too-many-public-methods,name-too-long - DocumentIntelligenceAdministrationClientMixinABC +class _DocumentIntelligenceAdministrationClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], DocumentIntelligenceAdministrationClientConfiguration] ): def _build_document_model_initial( @@ -2164,7 +2222,7 @@ def _build_document_model_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2271,7 +2329,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -2348,7 +2406,7 @@ def _compose_model_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2455,7 +2513,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -2595,7 +2653,7 @@ def authorize_model_copy( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2658,7 +2716,7 @@ def _copy_model_to_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2783,7 +2841,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -2859,7 +2917,7 @@ def get_model(self, model_id: str, **kwargs: Any) -> _models.DocumentModelDetail except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2878,7 +2936,7 @@ def get_model(self, model_id: str, **kwargs: Any) -> _models.DocumentModelDetail return deserialized # type: ignore @distributed_trace - def list_models(self, **kwargs: Any) -> Iterable["_models.DocumentModelDetails"]: + def list_models(self, **kwargs: Any) -> ItemPaged["_models.DocumentModelDetails"]: """List all document models. :return: An iterator like instance of DocumentModelDetails @@ -2938,7 +2996,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.DocumentModelDetails], deserialized["value"]) + list_of_elem = _deserialize(List[_models.DocumentModelDetails], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, iter(list_of_elem) @@ -2954,7 +3012,7 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3004,7 +3062,7 @@ def delete_model(self, model_id: str, **kwargs: Any) -> None: # pylint: disable if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3061,7 +3119,7 @@ def get_resource_details(self, **kwargs: Any) -> _models.DocumentIntelligenceRes except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3123,7 +3181,7 @@ def get_operation(self, operation_id: str, **kwargs: Any) -> _models.DocumentInt except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3142,7 +3200,7 @@ def get_operation(self, operation_id: str, **kwargs: Any) -> _models.DocumentInt return deserialized # type: ignore @distributed_trace - def list_operations(self, **kwargs: Any) -> Iterable["_models.DocumentIntelligenceOperationDetails"]: + def list_operations(self, **kwargs: Any) -> ItemPaged["_models.DocumentIntelligenceOperationDetails"]: """Lists all operations. :return: An iterator like instance of DocumentIntelligenceOperationDetails @@ -3202,7 +3260,9 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.DocumentIntelligenceOperationDetails], deserialized["value"]) + list_of_elem = _deserialize( + List[_models.DocumentIntelligenceOperationDetails], deserialized.get("value", []) + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, iter(list_of_elem) @@ -3218,7 +3278,7 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3274,7 +3334,7 @@ def _build_classifier_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3381,7 +3441,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -3526,7 +3586,7 @@ def authorize_classifier_copy( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3589,7 +3649,7 @@ def _copy_classifier_to_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3715,7 +3775,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -3792,7 +3852,7 @@ def get_classifier(self, classifier_id: str, **kwargs: Any) -> _models.DocumentC except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3811,7 +3871,7 @@ def get_classifier(self, classifier_id: str, **kwargs: Any) -> _models.DocumentC return deserialized # type: ignore @distributed_trace - def list_classifiers(self, **kwargs: Any) -> Iterable["_models.DocumentClassifierDetails"]: + def list_classifiers(self, **kwargs: Any) -> ItemPaged["_models.DocumentClassifierDetails"]: """List all document classifiers. :return: An iterator like instance of DocumentClassifierDetails @@ -3871,7 +3931,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.DocumentClassifierDetails], deserialized["value"]) + list_of_elem = _deserialize(List[_models.DocumentClassifierDetails], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, iter(list_of_elem) @@ -3887,7 +3947,7 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3939,7 +3999,7 @@ def delete_classifier( # pylint: disable=inconsistent-return-statements if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_patch.py index b5517b65441e..8bcb627aa475 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_patch.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_operations/_patch.py @@ -1,727 +1,15 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import sys -import re -import io -from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, Mapping, cast, overload +from typing import List -from azure.core.pipeline import PipelineResponse -from azure.core.polling import LROPoller, NoPolling, PollingMethod -from azure.core.polling.base_polling import LROBasePolling -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from ._operations import ( - DocumentIntelligenceClientOperationsMixin as GeneratedDIClientOps, - DocumentIntelligenceAdministrationClientOperationsMixin as GeneratedDIAdminClientOps, -) -from .. import models as _models -from .._model_base import _deserialize - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] -PollingReturnType_co = TypeVar("PollingReturnType_co", covariant=True) - - -def _parse_operation_id(operation_location_header): - regex = "[^:]+://[^/]+/documentintelligence/.+/([^?/]+)" - return re.match(regex, operation_location_header).group(1) - - -class AnalyzeDocumentLROPoller(LROPoller[PollingReturnType_co]): - @property - def details(self) -> Mapping[str, Any]: - """Returns metadata associated with the long-running operation. - - :return: Returns metadata associated with the long-running operation. - :rtype: Mapping[str, Any] - """ - return { - "operation_id": _parse_operation_id( - self.polling_method()._initial_response.http_response.headers["Operation-Location"] # type: ignore # pylint: disable=protected-access - ), - } - - @classmethod - def from_continuation_token( - cls, polling_method: PollingMethod[PollingReturnType_co], continuation_token: str, **kwargs: Any - ) -> "AnalyzeDocumentLROPoller": - ( - client, - initial_response, - deserialization_callback, - ) = polling_method.from_continuation_token(continuation_token, **kwargs) - - return cls(client, initial_response, deserialization_callback, polling_method) - - -class DocumentIntelligenceAdministrationClientOperationsMixin( - GeneratedDIAdminClientOps -): # pylint: disable=name-too-long - @distributed_trace - def begin_build_classifier( - self, body: Union[_models.BuildDocumentClassifierRequest, JSON, IO[bytes]], **kwargs: Any - ) -> LROPoller[_models.DocumentClassifierDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentClassifierDetails] = kwargs.pop("cls", None) - polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = self._build_classifier_initial( - body=body, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs, - ) - raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentClassifierDetails, response.json()) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: PollingMethod = cast( - PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) - ) - elif polling is False: - polling_method = cast(PollingMethod, NoPolling()) - else: - polling_method = polling - if cont_token: - return LROPoller[_models.DocumentClassifierDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return LROPoller[_models.DocumentClassifierDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace - def begin_build_document_model( - self, body: Union[_models.BuildDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any - ) -> LROPoller[_models.DocumentModelDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None) - polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = self._build_document_model_initial( - body=body, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs, - ) - raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentModelDetails, response.json()) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: PollingMethod = cast( - PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) - ) - elif polling is False: - polling_method = cast(PollingMethod, NoPolling()) - else: - polling_method = polling - if cont_token: - return LROPoller[_models.DocumentModelDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return LROPoller[_models.DocumentModelDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace - def begin_compose_model( - self, body: Union[_models.ComposeDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any - ) -> LROPoller[_models.DocumentModelDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None) - polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = self._compose_model_initial( - body=body, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs, - ) - raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentModelDetails, response.json()) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: PollingMethod = cast( - PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) - ) - elif polling is False: - polling_method = cast(PollingMethod, NoPolling()) - else: - polling_method = polling - if cont_token: - return LROPoller[_models.DocumentModelDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return LROPoller[_models.DocumentModelDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace - def begin_copy_model_to( - self, model_id: str, body: Union[_models.ModelCopyAuthorization, JSON, IO[bytes]], **kwargs: Any - ) -> LROPoller[_models.DocumentModelDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None) - polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = self._copy_model_to_initial( - model_id=model_id, - body=body, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs, - ) - raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result")) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: PollingMethod = cast( - PollingMethod, - LROBasePolling( - lro_delay, - path_format_arguments=path_format_arguments, - lro_options={"final-state-via": "operation-location"}, - **kwargs, - ), - ) - elif polling is False: - polling_method = cast(PollingMethod, NoPolling()) - else: - polling_method = polling - if cont_token: - return LROPoller[_models.DocumentModelDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return LROPoller[_models.DocumentModelDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace - def begin_copy_classifier_to( - self, - classifier_id: str, - body: Union[_models.ClassifierCopyAuthorization, JSON, IO[bytes]], - **kwargs: Any, - ) -> LROPoller[_models.DocumentClassifierDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentClassifierDetails] = kwargs.pop("cls", None) - polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = self._copy_classifier_to_initial( - classifier_id=classifier_id, - body=body, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs, - ) - raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result")) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: PollingMethod = cast( - PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, lro_options={"final-state-via": "operation-location"}, **kwargs) - ) - elif polling is False: - polling_method = cast(PollingMethod, NoPolling()) - else: - polling_method = polling - if cont_token: - return LROPoller[_models.DocumentClassifierDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return LROPoller[_models.DocumentClassifierDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - -class DocumentIntelligenceClientOperationsMixin(GeneratedDIClientOps): # pylint: disable=name-too-long - @overload - def begin_analyze_document( - self, - model_id: str, - body: _models.AnalyzeDocumentRequest, - *, - pages: Optional[str] = None, - locale: Optional[str] = None, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None, - query_fields: Optional[List[str]] = None, - output_content_format: Optional[Union[str, _models.DocumentContentFormat]] = None, - output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> AnalyzeDocumentLROPoller[_models.AnalyzeResult]: - """Analyzes document with document model. - - :param model_id: Unique document model name. Required. - :type model_id: str - :param body: Analyze request parameters. Default value is None. - :type body: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :keyword locale: Locale hint for text recognition and document analysis. Value may contain - only - the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is - None. - :paramtype locale: str - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword features: List of optional analysis features. Default value is None. - :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature] - :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber". - Default value is None. - :paramtype query_fields: list[str] - :keyword output_content_format: Format of the analyze result top-level content. Known values - are: "text" and "markdown". Default value is None. - :paramtype output_content_format: str or - ~azure.ai.documentintelligence.models.DocumentContentFormat - :keyword output: Additional outputs to generate during analysis. Default value is None. - :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: An instance of AnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is compatible - with MutableMapping - :rtype: AnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def begin_analyze_document( - self, - model_id: str, - body: JSON, - *, - pages: Optional[str] = None, - locale: Optional[str] = None, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None, - query_fields: Optional[List[str]] = None, - output_content_format: Optional[Union[str, _models.DocumentContentFormat]] = None, - output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> AnalyzeDocumentLROPoller[_models.AnalyzeResult]: - """Analyzes document with document model. - - :param model_id: Unique document model name. Required. - :type model_id: str - :param body: Analyze request parameters. Required. - :type body: JSON - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :keyword locale: Locale hint for text recognition and document analysis. Value may contain - only - the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is - None. - :paramtype locale: str - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword features: List of optional analysis features. Default value is None. - :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature] - :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber". - Default value is None. - :paramtype query_fields: list[str] - :keyword output_content_format: Format of the analyze result top-level content. Known values - are: "text" and "markdown". Default value is None. - :paramtype output_content_format: str or - ~azure.ai.documentintelligence.models.DocumentContentFormat - :keyword output: Additional outputs to generate during analysis. Default value is None. - :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: An instance of AnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is compatible - with MutableMapping - :rtype: AnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def begin_analyze_document( - self, - model_id: str, - body: IO[bytes], - *, - pages: Optional[str] = None, - locale: Optional[str] = None, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None, - query_fields: Optional[List[str]] = None, - output_content_format: Optional[Union[str, _models.DocumentContentFormat]] = None, - output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> AnalyzeDocumentLROPoller[_models.AnalyzeResult]: - """Analyzes document with document model. - - :param model_id: Unique document model name. Required. - :type model_id: str - :param body: Analyze request parameters. Required. - :type body: IO[bytes] - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :keyword locale: Locale hint for text recognition and document analysis. Value may contain - only - the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is - None. - :paramtype locale: str - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword features: List of optional analysis features. Default value is None. - :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature] - :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber". - Default value is None. - :paramtype query_fields: list[str] - :keyword output_content_format: Format of the analyze result top-level content. Known values - are: "text" and "markdown". Default value is None. - :paramtype output_content_format: str or - ~azure.ai.documentintelligence.models.DocumentContentFormat - :keyword output: Additional outputs to generate during analysis. Default value is None. - :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: An instance of AnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is compatible - with MutableMapping - :rtype: AnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def begin_analyze_document( - self, - model_id: str, - body: Union[_models.AnalyzeDocumentRequest, JSON, IO[bytes]], - *, - pages: Optional[str] = None, - locale: Optional[str] = None, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None, - query_fields: Optional[List[str]] = None, - output_content_format: Optional[Union[str, _models.DocumentContentFormat]] = None, - output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None, - **kwargs: Any, - ) -> AnalyzeDocumentLROPoller[_models.AnalyzeResult]: - """Analyzes document with document model. - - :param model_id: Unique document model name. Required. - :type model_id: str - :param body: Analyze request parameters. Is one of the following types: - AnalyzeDocumentRequest, JSON, IO[bytes] Required. - :type body: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest or JSON or - IO[bytes] - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :keyword locale: Locale hint for text recognition and document analysis. Value may contain - only - the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is - None. - :paramtype locale: str - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword features: List of optional analysis features. Default value is None. - :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature] - :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber". - Default value is None. - :paramtype query_fields: list[str] - :keyword output_content_format: Format of the analyze result top-level content. Known values - are: "text" and "markdown". Default value is None. - :paramtype output_content_format: str or - ~azure.ai.documentintelligence.models.DocumentContentFormat - :keyword output: Additional outputs to generate during analysis. Default value is None. - :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption] - :return: An instance of AnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is compatible - with MutableMapping - :rtype: AnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - cls: ClsType[_models.AnalyzeResult] = kwargs.pop("cls", None) - polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - if isinstance(body, (bytes, io.BytesIO, io.BufferedReader)): - content_type = "application/octet-stream" - raw_result = self._analyze_document_initial( - model_id=model_id, - body=body, - pages=pages, - locale=locale, - string_index_type=string_index_type, - features=features, - query_fields=query_fields, - output_content_format=output_content_format, - output=output, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs, - ) - raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult")) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: PollingMethod = cast( - PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) - ) - elif polling is False: - polling_method = cast(PollingMethod, NoPolling()) - else: - polling_method = polling - if cont_token: - return AnalyzeDocumentLROPoller[_models.AnalyzeResult].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return AnalyzeDocumentLROPoller[_models.AnalyzeResult]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace - def begin_classify_document( - self, - classifier_id: str, - body: Union[_models.ClassifyDocumentRequest, JSON, IO[bytes]], - *, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - split: Optional[Union[str, _models.SplitMode]] = None, - pages: Optional[str] = None, - **kwargs: Any, - ) -> LROPoller[_models.AnalyzeResult]: - """Classifies document with document classifier. - - :param classifier_id: Unique document classifier name. Required. - :type classifier_id: str - :param body: Classify request parameters. Is one of the following types: - ClassifyDocumentRequest, JSON, IO[bytes] Required. - :type body: ~azure.ai.documentintelligence.models.ClassifyDocumentRequest or JSON - or IO[bytes] - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage". - Default value is None. - :paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :return: An instance of LROPoller that returns AnalyzeResult. The AnalyzeResult is compatible - with MutableMapping - :rtype: ~azure.core.polling.LROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - if isinstance(body, (bytes, io.BytesIO, io.BufferedReader)): - content_type = "application/octet-stream" - return super().begin_classify_document( # type: ignore[arg-type, misc] - classifier_id=classifier_id, - body=body, # type: ignore[arg-type] - content_type=content_type, # type: ignore[arg-type] - string_index_type=string_index_type, - split=split, - pages=pages, - **kwargs, - ) - - @distributed_trace - def get_analyze_batch_result( # type: ignore[override] # pylint: disable=arguments-differ - self, continuation_token: str - ) -> LROPoller[_models.AnalyzeBatchResult]: - """Gets the result of batch document analysis. - - :param str continuation_token: An opaque continuation token. Required. - :return: An instance of LROPoller that returns AnalyzeBatchResult. The AnalyzeBatchResult is - compatible with MutableMapping - :rtype: ~azure.core.polling.LROPoller[~azure.ai.documentintelligence.models.AnalyzeBatchResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - return self.begin_analyze_batch_documents( # type: ignore[call-overload] - None, None, continuation_token=continuation_token - ) - - -__all__: List[str] = [ - "DocumentIntelligenceClientOperationsMixin", - "DocumentIntelligenceAdministrationClientOperationsMixin", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_patch.py index a77adcdfb601..8bcb627aa475 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_patch.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_patch.py @@ -1,91 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import Any, List, Union -from azure.core.credentials import AzureKeyCredential, TokenCredential -from ._client import ( - DocumentIntelligenceClient as DIClientGenerated, - DocumentIntelligenceAdministrationClient as DIAClientGenerated, -) -from ._operations._patch import AnalyzeDocumentLROPoller +from typing import List - -class DocumentIntelligenceClient(DIClientGenerated): - """DocumentIntelligenceClient. - - :param endpoint: The Document Intelligence service endpoint. Required. - :type endpoint: str - :param credential: Credential needed for the client to connect to Azure. Is either a - AzureKeyCredential type or a TokenCredential type. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-11-30". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - :keyword int polling_interval: Default waiting time between two polls for LRO operations if no - Retry-After header is present. - """ - - def __init__( - self, - endpoint: str, - credential: Union[AzureKeyCredential, TokenCredential], - **kwargs: Any, - ) -> None: - # Patch the default polling interval to be 1s. - polling_interval = kwargs.pop("polling_interval", 1) - super().__init__( - endpoint=endpoint, - credential=credential, - polling_interval=polling_interval, - **kwargs, - ) - - -class DocumentIntelligenceAdministrationClient(DIAClientGenerated): - """DocumentIntelligenceAdministrationClient. - - :param endpoint: The Document Intelligence service endpoint. Required. - :type endpoint: str - :param credential: Credential needed for the client to connect to Azure. Is either a - AzureKeyCredential type or a TokenCredential type. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-11-30". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - :keyword int polling_interval: Default waiting time between two polls for LRO operations if no - Retry-After header is present. - """ - - def __init__( - self, - endpoint: str, - credential: Union[AzureKeyCredential, TokenCredential], - **kwargs: Any, - ) -> None: - # Patch the default polling interval to be 1s. - polling_interval = kwargs.pop("polling_interval", 1) - super().__init__( - endpoint=endpoint, - credential=credential, - polling_interval=polling_interval, - **kwargs, - ) - - -__all__: List[str] = [ - "DocumentIntelligenceClient", - "DocumentIntelligenceAdministrationClient", - "AnalyzeDocumentLROPoller", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/__init__.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/__init__.py new file mode 100644 index 000000000000..8026245c2abc --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_model_base.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/model_base.py similarity index 93% rename from sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_model_base.py rename to sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/model_base.py index 7f73b97b23ef..c62e7e7784af 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_model_base.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/model_base.py @@ -2,8 +2,9 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=protected-access, broad-except @@ -21,17 +22,14 @@ from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import xml.etree.ElementTree as ET +from collections.abc import MutableMapping from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError from azure.core import CaseInsensitiveEnumMeta from azure.core.pipeline import PipelineResponse from azure.core.serialization import _Null - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping +from azure.core.rest import HttpResponse _LOGGER = logging.getLogger(__name__) @@ -347,7 +345,7 @@ def _get_model(module_name: str, model_name: str): _UNSET = object() -class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object +class _MyMutableMapping(MutableMapping[str, typing.Any]): def __init__(self, data: typing.Dict[str, typing.Any]) -> None: self._data = data @@ -373,50 +371,97 @@ def __ne__(self, other: typing.Any) -> bool: return not self.__eq__(other) def keys(self) -> typing.KeysView[str]: + """ + :returns: a set-like object providing a view on D's keys + :rtype: ~typing.KeysView + """ return self._data.keys() def values(self) -> typing.ValuesView[typing.Any]: + """ + :returns: an object providing a view on D's values + :rtype: ~typing.ValuesView + """ return self._data.values() def items(self) -> typing.ItemsView[str, typing.Any]: + """ + :returns: set-like object providing a view on D's items + :rtype: ~typing.ItemsView + """ return self._data.items() def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Get the value for key if key is in the dictionary, else default. + :param str key: The key to look up. + :param any default: The value to return if key is not in the dictionary. Defaults to None + :returns: D[k] if k in D, else d. + :rtype: any + """ try: return self[key] except KeyError: return default @typing.overload - def pop(self, key: str) -> typing.Any: ... + def pop(self, key: str) -> typing.Any: ... # pylint: disable=arguments-differ @typing.overload - def pop(self, key: str, default: _T) -> _T: ... + def pop(self, key: str, default: _T) -> _T: ... # pylint: disable=signature-differs @typing.overload - def pop(self, key: str, default: typing.Any) -> typing.Any: ... + def pop(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Removes specified key and return the corresponding value. + :param str key: The key to pop. + :param any default: The value to return if key is not in the dictionary + :returns: The value corresponding to the key. + :rtype: any + :raises KeyError: If key is not found and default is not given. + """ if default is _UNSET: return self._data.pop(key) return self._data.pop(key, default) def popitem(self) -> typing.Tuple[str, typing.Any]: + """ + Removes and returns some (key, value) pair + :returns: The (key, value) pair. + :rtype: tuple + :raises KeyError: if D is empty. + """ return self._data.popitem() def clear(self) -> None: + """ + Remove all items from D. + """ self._data.clear() - def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: # pylint: disable=arguments-differ + """ + Updates D from mapping/iterable E and F. + :param any args: Either a mapping object or an iterable of key-value pairs. + """ self._data.update(*args, **kwargs) @typing.overload def setdefault(self, key: str, default: None = None) -> None: ... @typing.overload - def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Same as calling D.get(k, d), and setting D[k]=d if k not found + :param str key: The key to look up. + :param any default: The value to set if key is not in the dictionary + :returns: D[k] if k in D, else d. + :rtype: any + """ if default is _UNSET: return self._data.setdefault(key) return self._data.setdefault(key, default) @@ -597,7 +642,7 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - return super().__new__(cls) # pylint: disable=no-value-for-parameter + return super().__new__(cls) def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: @@ -633,7 +678,7 @@ def _deserialize(cls, data, exist_discriminators): discriminator_value = data.find(xml_name).text # pyright: ignore else: discriminator_value = data.get(discriminator._rest_name) - mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member return mapped_cls._deserialize(data, exist_discriminators) def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: @@ -896,13 +941,26 @@ def _deserialize( def _failsafe_deserialize( deserializer: typing.Any, - value: typing.Any, + response: HttpResponse, module: typing.Optional[str] = None, rf: typing.Optional["_RestField"] = None, format: typing.Optional[str] = None, ) -> typing.Any: try: - return _deserialize(deserializer, value, module, rf, format) + return _deserialize(deserializer, response.json(), module, rf, format) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +def _failsafe_deserialize_xml( + deserializer: typing.Any, + response: HttpResponse, +) -> typing.Any: + try: + return _deserialize_xml(deserializer, response.text()) except DeserializationError: _LOGGER.warning( "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_serialization.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/serialization.py similarity index 94% rename from sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_serialization.py rename to sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/serialization.py index b24ab2885450..eb86ea23c965 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_serialization.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/serialization.py @@ -1,28 +1,10 @@ -# pylint: disable=too-many-lines +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 # -------------------------------------------------------------------------- -# # Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pyright: reportUnnecessaryTypeIgnoreComment=false @@ -48,9 +30,7 @@ IO, Mapping, Callable, - TypeVar, MutableMapping, - Type, List, ) @@ -61,13 +41,13 @@ import xml.etree.ElementTree as ET import isodate # type: ignore +from typing_extensions import Self from azure.core.exceptions import DeserializationError, SerializationError from azure.core.serialization import NULL as CoreNull _BOM = codecs.BOM_UTF8.decode(encoding="utf-8") -ModelType = TypeVar("ModelType", bound="Model") JSON = MutableMapping[str, Any] @@ -185,73 +165,7 @@ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], except NameError: _long_type = int - -class UTC(datetime.tzinfo): - """Time Zone info for handling UTC""" - - def utcoffset(self, dt): - """UTF offset for UTC is 0. - - :param datetime.datetime dt: The datetime - :returns: The offset - :rtype: datetime.timedelta - """ - return datetime.timedelta(0) - - def tzname(self, dt): - """Timestamp representation. - - :param datetime.datetime dt: The datetime - :returns: The timestamp representation - :rtype: str - """ - return "Z" - - def dst(self, dt): - """No daylight saving for UTC. - - :param datetime.datetime dt: The datetime - :returns: The daylight saving time - :rtype: datetime.timedelta - """ - return datetime.timedelta(hours=1) - - -try: - from datetime import timezone as _FixedOffset # type: ignore -except ImportError: # Python 2.7 - - class _FixedOffset(datetime.tzinfo): # type: ignore - """Fixed offset in minutes east from UTC. - Copy/pasted from Python doc - :param datetime.timedelta offset: offset in timedelta format - """ - - def __init__(self, offset) -> None: - self.__offset = offset - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return str(self.__offset.total_seconds() / 3600) - - def __repr__(self): - return "".format(self.tzname(None)) - - def dst(self, dt): - return datetime.timedelta(0) - - def __getinitargs__(self): - return (self.__offset,) - - -try: - from datetime import timezone - - TZ_UTC = timezone.utc -except ImportError: - TZ_UTC = UTC() # type: ignore +TZ_UTC = datetime.timezone.utc _FLATTEN = re.compile(r"(? ModelType: + def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: """Parse a str using the RestAPI syntax and return a model. :param str data: A str using RestAPI structure. JSON by default. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong - :rtype: ModelType + :raises DeserializationError: if something went wrong + :rtype: Self """ deserializer = Deserializer(cls._infer_class_models()) return deserializer(cls.__name__, data, content_type=content_type) # type: ignore @classmethod def from_dict( - cls: Type[ModelType], + cls, data: Any, key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, content_type: Optional[str] = None, - ) -> ModelType: + ) -> Self: """Parse a dict using given key extractor return a model. By default consider key @@ -479,8 +393,8 @@ def from_dict( :param function key_extractors: A key extractor function. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong - :rtype: ModelType + :raises DeserializationError: if something went wrong + :rtype: Self """ deserializer = Deserializer(cls._infer_class_models()) deserializer.key_extractors = ( # type: ignore @@ -626,7 +540,7 @@ def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, to :param object target_obj: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, dict - :raises: SerializationError if serialization fails. + :raises SerializationError: if serialization fails. :returns: The serialized data. """ key_transformer = kwargs.get("key_transformer", self.key_transformer) @@ -736,8 +650,8 @@ def body(self, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: dict - :raises: SerializationError if serialization fails. - :raises: ValueError if data is None + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized request body """ @@ -781,8 +695,8 @@ def url(self, name, data, data_type, **kwargs): :param str data_type: The type to be serialized from. :rtype: str :returns: The serialized URL path - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None """ try: output = self.serialize_data(data, data_type, **kwargs) @@ -805,8 +719,8 @@ def query(self, name, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, list - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized query parameter """ try: @@ -835,8 +749,8 @@ def header(self, name, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized header """ try: @@ -855,9 +769,9 @@ def serialize_data(self, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. - :raises: AttributeError if required data is None. - :raises: ValueError if data is None - :raises: SerializationError if serialization fails. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. :returns: The serialized data. :rtype: str, int, float, bool, dict, list """ @@ -1192,7 +1106,7 @@ def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: str - :raises: TypeError if format invalid. + :raises TypeError: if format invalid. :return: serialized rfc """ try: @@ -1218,7 +1132,7 @@ def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: str - :raises: SerializationError if format invalid. + :raises SerializationError: if format invalid. :return: serialized iso """ if isinstance(attr, str): @@ -1251,7 +1165,7 @@ def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: int - :raises: SerializationError if format invalid + :raises SerializationError: if format invalid :return: serialied unix """ if isinstance(attr, int): @@ -1429,7 +1343,7 @@ def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument # Iter and wrapped, should have found one node only (the wrap one) if len(children) != 1: raise DeserializationError( - "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( xml_name ) ) @@ -1488,7 +1402,7 @@ def __call__(self, target_obj, response_data, content_type=None): :param str target_obj: Target data type to deserialize to. :param requests.Response response_data: REST response object. :param str content_type: Swagger "produces" if available. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1502,7 +1416,7 @@ def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return :param str target_obj: Target data type to deserialize to. :param object data: Object to deserialize. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1717,7 +1631,7 @@ def deserialize_data(self, data, data_type): # pylint: disable=too-many-return- :param str data: The response string to be deserialized. :param str data_type: The type to deserialize to. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1799,7 +1713,7 @@ def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return :param dict attr: Dictionary to be deserialized. :return: Deserialized object. :rtype: dict - :raises: TypeError if non-builtin datatype encountered. + :raises TypeError: if non-builtin datatype encountered. """ if attr is None: return None @@ -1845,7 +1759,7 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return :param str data_type: deserialization data type. :return: Deserialized basic type. :rtype: str, int, float or bool - :raises: TypeError if string format is not valid. + :raises TypeError: if string format is not valid. """ # If we're here, data is supposed to be a basic type. # If it's still an XML node, take the text @@ -1936,7 +1850,7 @@ def deserialize_bytearray(attr): :param str attr: response string to be deserialized. :return: Deserialized bytearray :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1949,7 +1863,7 @@ def deserialize_base64(attr): :param str attr: response string to be deserialized. :return: Deserialized base64 string :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1964,7 +1878,7 @@ def deserialize_decimal(attr): :param str attr: response string to be deserialized. :return: Deserialized decimal - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. :rtype: decimal """ if isinstance(attr, ET.Element): @@ -1982,7 +1896,7 @@ def deserialize_long(attr): :param str attr: response string to be deserialized. :return: Deserialized int :rtype: long or int - :raises: ValueError if string format invalid. + :raises ValueError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1995,7 +1909,7 @@ def deserialize_duration(attr): :param str attr: response string to be deserialized. :return: Deserialized duration :rtype: TimeDelta - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2013,7 +1927,7 @@ def deserialize_date(attr): :param str attr: response string to be deserialized. :return: Deserialized date :rtype: Date - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2029,7 +1943,7 @@ def deserialize_time(attr): :param str attr: response string to be deserialized. :return: Deserialized time :rtype: datetime.time - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2044,14 +1958,14 @@ def deserialize_rfc(attr): :param str attr: response string to be deserialized. :return: Deserialized RFC datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text try: parsed_date = email.utils.parsedate_tz(attr) # type: ignore date_obj = datetime.datetime( - *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) ) if not date_obj.tzinfo: date_obj = date_obj.astimezone(tz=TZ_UTC) @@ -2067,7 +1981,7 @@ def deserialize_iso(attr): :param str attr: response string to be deserialized. :return: Deserialized ISO datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2105,7 +2019,7 @@ def deserialize_unix(attr): :param int attr: Object to be serialized. :return: Deserialized datetime :rtype: Datetime - :raises: DeserializationError if format invalid + :raises DeserializationError: if format invalid """ if isinstance(attr, ET.Element): attr = int(attr.text) # type: ignore diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/utils.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/utils.py new file mode 100644 index 000000000000..35c9c836f85f --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_utils/utils.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import Generic, TYPE_CHECKING, TypeVar + +if TYPE_CHECKING: + from .serialization import Deserializer, Serializer + + +TClient = TypeVar("TClient") +TConfig = TypeVar("TConfig") + + +class ClientMixinABC(ABC, Generic[TClient, TConfig]): + """DO NOT use this class. It is for internal typing use only.""" + + _client: TClient + _config: TConfig + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_validation.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_validation.py index 752b2822f9d3..f5af3a4eb8a2 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_validation.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_validation.py @@ -10,6 +10,22 @@ def api_version_validation(**kwargs): params_added_on = kwargs.pop("params_added_on", {}) method_added_on = kwargs.pop("method_added_on", "") + api_versions_list = kwargs.pop("api_versions_list", []) + + def _index_with_default(value: str, default: int = -1) -> int: + """Get the index of value in lst, or return default if not found. + + :param value: The value to search for in the api_versions_list. + :type value: str + :param default: The default value to return if the value is not found. + :type default: int + :return: The index of the value in the list, or the default value if not found. + :rtype: int + """ + try: + return api_versions_list.index(value) + except ValueError: + return default def decorator(func): @functools.wraps(func) @@ -21,7 +37,7 @@ def wrapper(*args, **kwargs): except AttributeError: return func(*args, **kwargs) - if method_added_on > client_api_version: + if _index_with_default(method_added_on) > _index_with_default(client_api_version): raise ValueError( f"'{func.__name__}' is not available in API version " f"{client_api_version}. Pass service API version {method_added_on} or newer to your client." @@ -31,7 +47,7 @@ def wrapper(*args, **kwargs): parameter: api_version for api_version, parameters in params_added_on.items() for parameter in parameters - if parameter in kwargs and api_version > client_api_version + if parameter in kwargs and _index_with_default(api_version) > _index_with_default(client_api_version) } if unsupported: raise ValueError( diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_vendor.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_vendor.py deleted file mode 100644 index 1f66ff079df6..000000000000 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_vendor.py +++ /dev/null @@ -1,37 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from abc import ABC -from typing import TYPE_CHECKING - -from ._configuration import ( - DocumentIntelligenceAdministrationClientConfiguration, - DocumentIntelligenceClientConfiguration, -) - -if TYPE_CHECKING: - from azure.core import PipelineClient - - from ._serialization import Deserializer, Serializer - - -class DocumentIntelligenceClientMixinABC(ABC): - """DO NOT use this class. It is for internal typing use only.""" - - _client: "PipelineClient" - _config: DocumentIntelligenceClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" - - -class DocumentIntelligenceAdministrationClientMixinABC(ABC): # pylint: disable=name-too-long - """DO NOT use this class. It is for internal typing use only.""" - - _client: "PipelineClient" - _config: DocumentIntelligenceAdministrationClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_version.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_version.py index 85180e818ac4..0ec13ea52bbf 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_version.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.3" +VERSION = "1.0.0" diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_client.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_client.py index 3fd4e8d23300..ac80a6042ee9 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_client.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_client.py @@ -15,30 +15,30 @@ from azure.core.pipeline import policies from azure.core.rest import AsyncHttpResponse, HttpRequest -from .._serialization import Deserializer, Serializer +from .._utils.serialization import Deserializer, Serializer from ._configuration import ( DocumentIntelligenceAdministrationClientConfiguration, DocumentIntelligenceClientConfiguration, ) from ._operations import ( - DocumentIntelligenceAdministrationClientOperationsMixin, - DocumentIntelligenceClientOperationsMixin, + _DocumentIntelligenceAdministrationClientOperationsMixin, + _DocumentIntelligenceClientOperationsMixin, ) if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential -class DocumentIntelligenceClient(DocumentIntelligenceClientOperationsMixin): +class DocumentIntelligenceClient(_DocumentIntelligenceClientOperationsMixin): """DocumentIntelligenceClient. :param endpoint: The Document Intelligence service endpoint. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2024-11-30". + :keyword api_version: The API version to use for this operation. Default value is "2025-09-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no @@ -50,6 +50,7 @@ def __init__( ) -> None: _endpoint = "{endpoint}/documentintelligence" self._config = DocumentIntelligenceClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -112,16 +113,16 @@ async def __aexit__(self, *exc_details: Any) -> None: await self._client.__aexit__(*exc_details) -class DocumentIntelligenceAdministrationClient(DocumentIntelligenceAdministrationClientOperationsMixin): +class DocumentIntelligenceAdministrationClient(_DocumentIntelligenceAdministrationClientOperationsMixin): """DocumentIntelligenceAdministrationClient. :param endpoint: The Document Intelligence service endpoint. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2024-11-30". + :keyword api_version: The API version to use for this operation. Default value is "2025-09-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no @@ -135,6 +136,7 @@ def __init__( self._config = DocumentIntelligenceAdministrationClientConfiguration( endpoint=endpoint, credential=credential, **kwargs ) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_configuration.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_configuration.py index 658a80cfa20c..497ce23a6834 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_configuration.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_configuration.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -25,11 +26,11 @@ class DocumentIntelligenceClientConfiguration: # pylint: disable=too-many-insta :param endpoint: The Document Intelligence service endpoint. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2024-11-30". + :keyword api_version: The API version to use for this operation. Default value is "2025-09-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -37,7 +38,7 @@ class DocumentIntelligenceClientConfiguration: # pylint: disable=too-many-insta def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2024-11-30") + api_version: str = kwargs.pop("api_version", "2025-09-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") @@ -81,11 +82,11 @@ class DocumentIntelligenceAdministrationClientConfiguration: # pylint: disable= :param endpoint: The Document Intelligence service endpoint. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2024-11-30". + :keyword api_version: The API version to use for this operation. Default value is "2025-09-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -93,7 +94,7 @@ class DocumentIntelligenceAdministrationClientConfiguration: # pylint: disable= def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2024-11-30") + api_version: str = kwargs.pop("api_version", "2025-09-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/__init__.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/__init__.py index 63b6bbb81ed8..0ab0b5483608 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/__init__.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/__init__.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -12,16 +13,13 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import DocumentIntelligenceClientOperationsMixin # type: ignore -from ._operations import DocumentIntelligenceAdministrationClientOperationsMixin # type: ignore +from ._operations import _DocumentIntelligenceClientOperationsMixin # type: ignore # pylint: disable=unused-import +from ._operations import _DocumentIntelligenceAdministrationClientOperationsMixin # type: ignore # pylint: disable=unused-import from ._patch import __all__ as _patch_all from ._patch import * from ._patch import patch_sdk as _patch_sdk -__all__ = [ - "DocumentIntelligenceClientOperationsMixin", - "DocumentIntelligenceAdministrationClientOperationsMixin", -] +__all__ = [] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_operations.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_operations.py index 334e48906413..0971d8bc45b2 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_operations.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,12 +6,13 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase import json -import sys -from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload +from typing import Any, AsyncIterator, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -32,7 +33,6 @@ from azure.core.utils import case_insensitive_dict from ... import models as _models -from ..._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize from ..._operations._operations import ( build_document_intelligence_administration_authorize_classifier_copy_request, build_document_intelligence_administration_authorize_model_copy_request, @@ -58,20 +58,25 @@ build_document_intelligence_get_analyze_batch_result_request, build_document_intelligence_get_analyze_result_figure_request, build_document_intelligence_get_analyze_result_pdf_request, + build_document_intelligence_get_analyze_result_png_request, build_document_intelligence_list_analyze_batch_results_request, ) -from .._vendor import DocumentIntelligenceAdministrationClientMixinABC, DocumentIntelligenceClientMixinABC +from ..._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from ..._utils.utils import ClientMixinABC +from ..._validation import api_version_validation +from .._configuration import ( + DocumentIntelligenceAdministrationClientConfiguration, + DocumentIntelligenceClientConfiguration, +) -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +JSON = MutableMapping[str, Any] T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] -class DocumentIntelligenceClientOperationsMixin(DocumentIntelligenceClientMixinABC): # pylint: disable=name-too-long +class _DocumentIntelligenceClientOperationsMixin( + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], DocumentIntelligenceClientConfiguration] +): async def _analyze_document_initial( self, @@ -141,7 +146,7 @@ async def _analyze_document_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -395,7 +400,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult")) + deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -475,7 +480,76 @@ async def get_analyze_result_pdf(self, model_id: str, result_id: str, **kwargs: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2025-09-01", + params_added_on={"2025-09-01": ["api_version", "model_id", "result_id", "accept"]}, + api_versions_list=["2025-09-01"], + ) + async def get_analyze_result_png(self, model_id: str, result_id: str, **kwargs: Any) -> AsyncIterator[bytes]: + """Gets the generated searchable PNG output from document analysis. + + :param model_id: Unique document model name. Required. + :type model_id: str + :param result_id: Analyze operation result ID. Required. + :type result_id: str + :return: AsyncIterator[bytes] + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_document_intelligence_get_analyze_result_png_request( + model_id=model_id, + result_id=result_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -544,7 +618,7 @@ async def get_analyze_result_figure( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -603,7 +677,7 @@ async def delete_analyze_result(self, model_id: str, result_id: str, **kwargs: A if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if cls: @@ -677,7 +751,7 @@ async def _analyze_batch_documents_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -936,7 +1010,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.AnalyzeBatchResult, response.json().get("result")) + deserialized = _deserialize(_models.AnalyzeBatchResult, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -968,7 +1042,7 @@ def get_long_running_output(pipeline_response): @distributed_trace def list_analyze_batch_results( self, model_id: str, **kwargs: Any - ) -> AsyncIterable["_models.AnalyzeBatchOperation"]: + ) -> AsyncItemPaged["_models.AnalyzeBatchOperation"]: """List batch document analysis results. :param model_id: Unique document model name. Required. @@ -1031,7 +1105,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.AnalyzeBatchOperation], deserialized["value"]) + list_of_elem = _deserialize(List[_models.AnalyzeBatchOperation], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -1047,7 +1121,7 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -1100,7 +1174,7 @@ async def delete_analyze_batch_result(self, model_id: str, result_id: str, **kwa if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if cls: @@ -1159,7 +1233,7 @@ async def get_analyze_batch_result( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1232,7 +1306,7 @@ async def _classify_document_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -1414,7 +1488,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult")) + deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -1444,8 +1518,10 @@ def get_long_running_output(pipeline_response): ) -class DocumentIntelligenceAdministrationClientOperationsMixin( # pylint: disable=too-many-public-methods,name-too-long - DocumentIntelligenceAdministrationClientMixinABC +class _DocumentIntelligenceAdministrationClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[ + AsyncPipelineClient[HttpRequest, AsyncHttpResponse], DocumentIntelligenceAdministrationClientConfiguration + ] ): async def _build_document_model_initial( @@ -1497,7 +1573,7 @@ async def _build_document_model_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -1604,7 +1680,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -1682,7 +1758,7 @@ async def _compose_model_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -1789,7 +1865,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -1930,7 +2006,7 @@ async def authorize_model_copy( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1993,7 +2069,7 @@ async def _copy_model_to_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2118,7 +2194,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -2195,7 +2271,7 @@ async def get_model(self, model_id: str, **kwargs: Any) -> _models.DocumentModel except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2214,7 +2290,7 @@ async def get_model(self, model_id: str, **kwargs: Any) -> _models.DocumentModel return deserialized # type: ignore @distributed_trace - def list_models(self, **kwargs: Any) -> AsyncIterable["_models.DocumentModelDetails"]: + def list_models(self, **kwargs: Any) -> AsyncItemPaged["_models.DocumentModelDetails"]: """List all document models. :return: An iterator like instance of DocumentModelDetails @@ -2274,7 +2350,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.DocumentModelDetails], deserialized["value"]) + list_of_elem = _deserialize(List[_models.DocumentModelDetails], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -2290,7 +2366,7 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -2340,7 +2416,7 @@ async def delete_model(self, model_id: str, **kwargs: Any) -> None: if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2397,7 +2473,7 @@ async def get_resource_details(self, **kwargs: Any) -> _models.DocumentIntellige except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2459,7 +2535,7 @@ async def get_operation(self, operation_id: str, **kwargs: Any) -> _models.Docum except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2478,8 +2554,7 @@ async def get_operation(self, operation_id: str, **kwargs: Any) -> _models.Docum return deserialized # type: ignore @distributed_trace - def list_operations(self, **kwargs: Any) -> AsyncIterable["_models.DocumentIntelligenceOperationDetails"]: - # pylint: disable=line-too-long + def list_operations(self, **kwargs: Any) -> AsyncItemPaged["_models.DocumentIntelligenceOperationDetails"]: """Lists all operations. :return: An iterator like instance of DocumentIntelligenceOperationDetails @@ -2539,7 +2614,9 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.DocumentIntelligenceOperationDetails], deserialized["value"]) + list_of_elem = _deserialize( + List[_models.DocumentIntelligenceOperationDetails], deserialized.get("value", []) + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -2555,7 +2632,7 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -2611,7 +2688,7 @@ async def _build_classifier_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2718,7 +2795,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -2864,7 +2941,7 @@ async def authorize_classifier_copy( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2927,7 +3004,7 @@ async def _copy_classifier_to_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3053,7 +3130,7 @@ def get_long_running_output(pipeline_response): "str", response.headers.get("Operation-Location") ) - deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result")) + deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result", {})) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized @@ -3131,7 +3208,7 @@ async def get_classifier(self, classifier_id: str, **kwargs: Any) -> _models.Doc except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3150,7 +3227,7 @@ async def get_classifier(self, classifier_id: str, **kwargs: Any) -> _models.Doc return deserialized # type: ignore @distributed_trace - def list_classifiers(self, **kwargs: Any) -> AsyncIterable["_models.DocumentClassifierDetails"]: + def list_classifiers(self, **kwargs: Any) -> AsyncItemPaged["_models.DocumentClassifierDetails"]: """List all document classifiers. :return: An iterator like instance of DocumentClassifierDetails @@ -3210,7 +3287,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.DocumentClassifierDetails], deserialized["value"]) + list_of_elem = _deserialize(List[_models.DocumentClassifierDetails], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -3226,7 +3303,7 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3276,7 +3353,7 @@ async def delete_classifier(self, classifier_id: str, **kwargs: Any) -> None: if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response.json()) + error = _failsafe_deserialize(_models.DocumentIntelligenceErrorResponse, response) raise HttpResponseError(response=response, model=error) response_headers = {} diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_patch.py index 8158f14847f5..8bcb627aa475 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_patch.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_operations/_patch.py @@ -1,709 +1,15 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import sys -import io -from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, Mapping, cast, overload +from typing import List -from azure.core.pipeline import PipelineResponse -from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod -from azure.core.polling.async_base_polling import AsyncLROBasePolling -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ._operations import ( - DocumentIntelligenceClientOperationsMixin as GeneratedDIClientOps, - DocumentIntelligenceAdministrationClientOperationsMixin as GeneratedDIAdminClientOps, -) -from ... import models as _models -from ..._model_base import _deserialize -from ..._operations._patch import PollingReturnType_co, _parse_operation_id - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class AsyncAnalyzeDocumentLROPoller(AsyncLROPoller[PollingReturnType_co]): - @property - def details(self) -> Mapping[str, Any]: - """Returns metadata associated with the long-running operation. - - :return: Returns metadata associated with the long-running operation. - :rtype: Mapping[str, Any] - """ - return { - "operation_id": _parse_operation_id( - self.polling_method()._initial_response.http_response.headers["Operation-Location"] # type: ignore # pylint: disable=protected-access - ), - } - - @classmethod - def from_continuation_token( - cls, polling_method: AsyncPollingMethod[PollingReturnType_co], continuation_token: str, **kwargs: Any - ) -> "AsyncAnalyzeDocumentLROPoller": - ( - client, - initial_response, - deserialization_callback, - ) = polling_method.from_continuation_token(continuation_token, **kwargs) - - return cls(client, initial_response, deserialization_callback, polling_method) - - -class DocumentIntelligenceAdministrationClientOperationsMixin( - GeneratedDIAdminClientOps -): # pylint: disable=name-too-long - @distributed_trace_async - async def begin_build_classifier( # type: ignore[override] - self, body: Union[_models.BuildDocumentClassifierRequest, JSON, IO[bytes]], **kwargs: Any - ) -> AsyncLROPoller[_models.DocumentClassifierDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentClassifierDetails] = kwargs.pop("cls", None) - polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = await self._build_classifier_initial( - body=body, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs - ) - await raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentClassifierDetails, response.json()) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: AsyncPollingMethod = cast( - AsyncPollingMethod, - AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), - ) - elif polling is False: - polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) - else: - polling_method = polling - if cont_token: - return AsyncLROPoller[_models.DocumentClassifierDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return AsyncLROPoller[_models.DocumentClassifierDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace_async - async def begin_build_document_model( # type: ignore[override] - self, body: Union[_models.BuildDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any - ) -> AsyncLROPoller[_models.DocumentModelDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None) - polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = await self._build_document_model_initial( - body=body, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs - ) - await raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentModelDetails, response.json()) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: AsyncPollingMethod = cast( - AsyncPollingMethod, - AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), - ) - elif polling is False: - polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) - else: - polling_method = polling - if cont_token: - return AsyncLROPoller[_models.DocumentModelDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return AsyncLROPoller[_models.DocumentModelDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace_async - async def begin_compose_model( # type: ignore[override] - self, body: Union[_models.ComposeDocumentModelRequest, JSON, IO[bytes]], **kwargs: Any - ) -> AsyncLROPoller[_models.DocumentModelDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None) - polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = await self._compose_model_initial( - body=body, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs - ) - await raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentModelDetails, response.json()) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: AsyncPollingMethod = cast( - AsyncPollingMethod, - AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), - ) - elif polling is False: - polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) - else: - polling_method = polling - if cont_token: - return AsyncLROPoller[_models.DocumentModelDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return AsyncLROPoller[_models.DocumentModelDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace_async - async def begin_copy_model_to( # type: ignore[override] - self, model_id: str, body: Union[_models.ModelCopyAuthorization, JSON, IO[bytes]], **kwargs: Any - ) -> AsyncLROPoller[_models.DocumentModelDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentModelDetails] = kwargs.pop("cls", None) - polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = await self._copy_model_to_initial( - model_id=model_id, - body=body, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs - ) - await raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentModelDetails, response.json().get("result")) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: AsyncPollingMethod = cast( - AsyncPollingMethod, - AsyncLROBasePolling( - lro_delay, - path_format_arguments=path_format_arguments, - lro_options={"final-state-via": "operation-location"}, - **kwargs - ), - ) - elif polling is False: - polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) - else: - polling_method = polling - if cont_token: - return AsyncLROPoller[_models.DocumentModelDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return AsyncLROPoller[_models.DocumentModelDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace_async - async def begin_copy_classifier_to( # type: ignore[override] - self, classifier_id: str, body: Union[_models.ClassifierCopyAuthorization, JSON, IO[bytes]], **kwargs: Any - ) -> AsyncLROPoller[_models.DocumentClassifierDetails]: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DocumentClassifierDetails] = kwargs.pop("cls", None) - polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = await self._copy_classifier_to_initial( - classifier_id=classifier_id, - body=body, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs - ) - await raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.DocumentClassifierDetails, response.json().get("result")) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: AsyncPollingMethod = cast( - AsyncPollingMethod, - AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, lro_options={"final-state-via": "operation-location"}, **kwargs), - ) - elif polling is False: - polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) - else: - polling_method = polling - if cont_token: - return AsyncLROPoller[_models.DocumentClassifierDetails].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return AsyncLROPoller[_models.DocumentClassifierDetails]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - -class DocumentIntelligenceClientOperationsMixin(GeneratedDIClientOps): # pylint: disable=name-too-long - @overload - async def begin_analyze_document( - self, - model_id: str, - body: _models.AnalyzeDocumentRequest, - *, - pages: Optional[str] = None, - locale: Optional[str] = None, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None, - query_fields: Optional[List[str]] = None, - output_content_format: Optional[Union[str, _models.DocumentContentFormat]] = None, - output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult]: - """Analyzes document with document model. - - :param model_id: Unique document model name. Required. - :type model_id: str - :param body: Analyze request parameters. Required. - :type body: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :keyword locale: Locale hint for text recognition and document analysis. Value may contain - only - the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is - None. - :paramtype locale: str - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword features: List of optional analysis features. Default value is None. - :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature] - :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber". - Default value is None. - :paramtype query_fields: list[str] - :keyword output_content_format: Format of the analyze result top-level content. Known values - are: "text" and "markdown". Default value is None. - :paramtype output_content_format: str or - ~azure.ai.documentintelligence.models.DocumentContentFormat - :keyword output: Additional outputs to generate during analysis. Default value is None. - :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: An instance of AsyncAnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is - compatible with MutableMapping - :rtype: AsyncAnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def begin_analyze_document( - self, - model_id: str, - body: JSON, - *, - pages: Optional[str] = None, - locale: Optional[str] = None, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None, - query_fields: Optional[List[str]] = None, - output_content_format: Optional[Union[str, _models.DocumentContentFormat]] = None, - output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult]: - """Analyzes document with document model. - - :param model_id: Unique document model name. Required. - :type model_id: str - :param body: Analyze request parameters. Required. - :type body: JSON - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :keyword locale: Locale hint for text recognition and document analysis. Value may contain - only - the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is - None. - :paramtype locale: str - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword features: List of optional analysis features. Default value is None. - :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature] - :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber". - Default value is None. - :paramtype query_fields: list[str] - :keyword output_content_format: Format of the analyze result top-level content. Known values - are: "text" and "markdown". Default value is None. - :paramtype output_content_format: str or - ~azure.ai.documentintelligence.models.DocumentContentFormat - :keyword output: Additional outputs to generate during analysis. Default value is None. - :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: An instance of AsyncAnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is - compatible with MutableMapping - :rtype: AsyncAnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def begin_analyze_document( - self, - model_id: str, - body: IO[bytes], - *, - pages: Optional[str] = None, - locale: Optional[str] = None, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None, - query_fields: Optional[List[str]] = None, - output_content_format: Optional[Union[str, _models.DocumentContentFormat]] = None, - output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult]: - """Analyzes document with document model. - - :param model_id: Unique document model name. Required. - :type model_id: str - :param body: Analyze request parameters. Required. - :type body: IO[bytes] - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :keyword locale: Locale hint for text recognition and document analysis. Value may contain - only - the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is - None. - :paramtype locale: str - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword features: List of optional analysis features. Default value is None. - :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature] - :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber". - Default value is None. - :paramtype query_fields: list[str] - :keyword output_content_format: Format of the analyze result top-level content. Known values - are: "text" and "markdown". Default value is None. - :paramtype output_content_format: str or - ~azure.ai.documentintelligence.models.DocumentContentFormat - :keyword output: Additional outputs to generate during analysis. Default value is None. - :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: An instance of AsyncAnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is - compatible with MutableMapping - :rtype: AsyncAnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def begin_analyze_document( # type: ignore[override] - self, - model_id: str, - body: Union[_models.AnalyzeDocumentRequest, JSON, IO[bytes]], - *, - pages: Optional[str] = None, - locale: Optional[str] = None, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - features: Optional[List[Union[str, _models.DocumentAnalysisFeature]]] = None, - query_fields: Optional[List[str]] = None, - output_content_format: Optional[Union[str, _models.DocumentContentFormat]] = None, - output: Optional[List[Union[str, _models.AnalyzeOutputOption]]] = None, - **kwargs: Any - ) -> AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult]: - """Analyzes document with document model. - - :param model_id: Unique document model name. Required. - :type model_id: str - :param body: Analyze request parameters. Is one of the following types: - AnalyzeDocumentRequest, JSON, IO[bytes]. Required. - :type body: ~azure.ai.documentintelligence.models.AnalyzeDocumentRequest or JSON or - IO[bytes] - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :keyword locale: Locale hint for text recognition and document analysis. Value may contain - only - the language code (ex. "en", "fr") or BCP 47 language tag (ex. "en-US"). Default value is - None. - :paramtype locale: str - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword features: List of optional analysis features. Default value is None. - :paramtype features: list[str or ~azure.ai.documentintelligence.models.DocumentAnalysisFeature] - :keyword query_fields: List of additional fields to extract. Ex. "NumberOfGuests,StoreNumber". - Default value is None. - :paramtype query_fields: list[str] - :keyword output_content_format: Format of the analyze result top-level content. Known values - are: "text" and "markdown". Default value is None. - :paramtype output_content_format: str or - ~azure.ai.documentintelligence.models.DocumentContentFormat - :keyword output: Additional outputs to generate during analysis. Default value is None. - :paramtype output: list[str or ~azure.ai.documentintelligence.models.AnalyzeOutputOption] - :return: An instance of AsyncAnalyzeDocumentLROPoller that returns AnalyzeResult. The AnalyzeResult is - compatible with MutableMapping - :rtype: AsyncAnalyzeDocumentLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - cls: ClsType[_models.AnalyzeResult] = kwargs.pop("cls", None) - polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - if isinstance(body, (bytes, io.BytesIO, io.BufferedReader)): - content_type = "application/octet-stream" - raw_result = await self._analyze_document_initial( - model_id=model_id, - body=body, - pages=pages, - locale=locale, - string_index_type=string_index_type, - features=features, - query_fields=query_fields, - output_content_format=output_content_format, - output=output, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs - ) - await raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.AnalyzeResult, response.json().get("analyzeResult")) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: AsyncPollingMethod = cast( - AsyncPollingMethod, - AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), - ) - elif polling is False: - polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) - else: - polling_method = polling - if cont_token: - return AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return AsyncAnalyzeDocumentLROPoller[_models.AnalyzeResult]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @distributed_trace_async - async def begin_classify_document( # type: ignore[override] - self, - classifier_id: str, - body: Union[_models.ClassifyDocumentRequest, JSON, IO[bytes]], - *, - string_index_type: Optional[Union[str, _models.StringIndexType]] = None, - split: Optional[Union[str, _models.SplitMode]] = None, - pages: Optional[str] = None, - **kwargs: Any - ) -> AsyncLROPoller[_models.AnalyzeResult]: - """Classifies document with document classifier. - - :param classifier_id: Unique document classifier name. Required. - :type classifier_id: str - :param body: Classify request parameters. Is one of the following types: - ClassifyDocumentRequest, JSON, IO[bytes] Required. - :type body: ~azure.ai.documentintelligence.models.ClassifyDocumentRequest or JSON - or IO[bytes] - :keyword string_index_type: Method used to compute string offset and length. Known values are: - "textElements", "unicodeCodePoint", and "utf16CodeUnit". Default value is None. - :paramtype string_index_type: str or ~azure.ai.documentintelligence.models.StringIndexType - :keyword split: Document splitting mode. Known values are: "auto", "none", and "perPage". - Default value is None. - :paramtype split: str or ~azure.ai.documentintelligence.models.SplitMode - :keyword pages: 1-based page numbers to analyze. Ex. "1-3,5,7-9". Default value is None. - :paramtype pages: str - :return: An instance of AsyncLROPoller that returns AnalyzeResult. The AnalyzeResult is - compatible with MutableMapping - :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.documentintelligence.models.AnalyzeResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - if isinstance(body, (bytes, io.BytesIO, io.BufferedReader)): - content_type = "application/octet-stream" - return await super().begin_classify_document( # type: ignore[arg-type, misc] - classifier_id=classifier_id, - body=body, # type: ignore[arg-type] - content_type=content_type, # type: ignore[arg-type] - string_index_type=string_index_type, - split=split, - pages=pages, - **kwargs - ) - - @distributed_trace_async - async def get_analyze_batch_result( # type: ignore[override] # pylint: disable=arguments-differ - self, continuation_token: str - ) -> AsyncLROPoller[_models.AnalyzeBatchResult]: - """Gets the result of batch document analysis. - - :param str continuation_token: An opaque continuation token. Required. - :return: An instance of AsyncLROPoller that returns AnalyzeBatchResult. The AnalyzeBatchResult - is compatible with MutableMapping - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.ai.documentintelligence.models.AnalyzeBatchResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - return await self.begin_analyze_batch_documents( # type: ignore[call-overload] - None, None, continuation_token=continuation_token - ) - - -__all__: List[str] = [ - "DocumentIntelligenceClientOperationsMixin", - "DocumentIntelligenceAdministrationClientOperationsMixin", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_patch.py index 471c1967baa0..8bcb627aa475 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_patch.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_patch.py @@ -1,90 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import Any, List, Union +from typing import List -from azure.core.credentials import AzureKeyCredential -from azure.core.credentials_async import AsyncTokenCredential - -from ._client import ( - DocumentIntelligenceClient as DIClientGenerated, - DocumentIntelligenceAdministrationClient as DIAClientGenerated, -) -from ..aio._operations._patch import AsyncAnalyzeDocumentLROPoller - - -class DocumentIntelligenceClient(DIClientGenerated): - """DocumentIntelligenceClient. - - :param endpoint: The Document Intelligence service endpoint. Required. - :type endpoint: str - :param credential: Credential needed for the client to connect to Azure. Is either a - AzureKeyCredential type or a TokenCredential type. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-11-30". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - credential: Union[AzureKeyCredential, AsyncTokenCredential], - **kwargs: Any, - ) -> None: - # Patch the default polling interval to be 1s. - polling_interval = kwargs.pop("polling_interval", 1) - super().__init__( - endpoint=endpoint, - credential=credential, - polling_interval=polling_interval, - **kwargs, - ) - - -class DocumentIntelligenceAdministrationClient(DIAClientGenerated): - """DocumentIntelligenceAdministrationClient. - - :param endpoint: The Document Intelligence service endpoint. Required. - :type endpoint: str - :param credential: Credential needed for the client to connect to Azure. Is either a - AzureKeyCredential type or a TokenCredential type. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-11-30". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - credential: Union[AzureKeyCredential, AsyncTokenCredential], - **kwargs: Any, - ) -> None: - # Patch the default polling interval to be 1s. - polling_interval = kwargs.pop("polling_interval", 1) - super().__init__( - endpoint=endpoint, - credential=credential, - polling_interval=polling_interval, - **kwargs, - ) - - -__all__: List[str] = [ - "DocumentIntelligenceClient", - "DocumentIntelligenceAdministrationClient", - "AsyncAnalyzeDocumentLROPoller", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_vendor.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_vendor.py deleted file mode 100644 index 9fec3b22f684..000000000000 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/aio/_vendor.py +++ /dev/null @@ -1,37 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from abc import ABC -from typing import TYPE_CHECKING - -from ._configuration import ( - DocumentIntelligenceAdministrationClientConfiguration, - DocumentIntelligenceClientConfiguration, -) - -if TYPE_CHECKING: - from azure.core import AsyncPipelineClient - - from .._serialization import Deserializer, Serializer - - -class DocumentIntelligenceClientMixinABC(ABC): - """DO NOT use this class. It is for internal typing use only.""" - - _client: "AsyncPipelineClient" - _config: DocumentIntelligenceClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" - - -class DocumentIntelligenceAdministrationClientMixinABC(ABC): # pylint: disable=name-too-long - """DO NOT use this class. It is for internal typing use only.""" - - _client: "AsyncPipelineClient" - _config: DocumentIntelligenceAdministrationClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_models.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_models.py index b19eff689606..1c86ed96f17b 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_models.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_models.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -11,15 +11,14 @@ import datetime from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload -from .. import _model_base -from .._model_base import rest_discriminator, rest_field +from .._utils.model_base import Model as _Model, rest_discriminator, rest_field from ._enums import OperationKind if TYPE_CHECKING: from .. import models as _models -class AddressValue(_model_base.Model): +class AddressValue(_Model): """Address field value. :ivar house_number: House or building number. @@ -55,34 +54,46 @@ class AddressValue(_model_base.Model): :vartype level: str """ - house_number: Optional[str] = rest_field(name="houseNumber") + house_number: Optional[str] = rest_field( + name="houseNumber", visibility=["read", "create", "update", "delete", "query"] + ) """House or building number.""" - po_box: Optional[str] = rest_field(name="poBox") + po_box: Optional[str] = rest_field(name="poBox", visibility=["read", "create", "update", "delete", "query"]) """Post office box number.""" - road: Optional[str] = rest_field() + road: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Street name.""" - city: Optional[str] = rest_field() + city: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Name of city, town, village, etc.""" - state: Optional[str] = rest_field() + state: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """First-level administrative division.""" - postal_code: Optional[str] = rest_field(name="postalCode") + postal_code: Optional[str] = rest_field( + name="postalCode", visibility=["read", "create", "update", "delete", "query"] + ) """Postal code used for mail sorting.""" - country_region: Optional[str] = rest_field(name="countryRegion") + country_region: Optional[str] = rest_field( + name="countryRegion", visibility=["read", "create", "update", "delete", "query"] + ) """Country/region.""" - street_address: Optional[str] = rest_field(name="streetAddress") + street_address: Optional[str] = rest_field( + name="streetAddress", visibility=["read", "create", "update", "delete", "query"] + ) """Street-level address, excluding city, state, countryRegion, and postalCode.""" - unit: Optional[str] = rest_field() + unit: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Apartment or office number.""" - city_district: Optional[str] = rest_field(name="cityDistrict") + city_district: Optional[str] = rest_field( + name="cityDistrict", visibility=["read", "create", "update", "delete", "query"] + ) """Districts or boroughs within a city, such as Brooklyn in New York City or City of Westminster in London.""" - state_district: Optional[str] = rest_field(name="stateDistrict") + state_district: Optional[str] = rest_field( + name="stateDistrict", visibility=["read", "create", "update", "delete", "query"] + ) """Second-level administrative division used in certain locales.""" - suburb: Optional[str] = rest_field() + suburb: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Unofficial neighborhood name, like Chinatown.""" - house: Optional[str] = rest_field() + house: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Build name, such as World Trade Center.""" - level: Optional[str] = rest_field() + level: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Floor number, such as 3F.""" @overload @@ -116,11 +127,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AnalyzeBatchDocumentsRequest(_model_base.Model): +class AnalyzeBatchDocumentsRequest(_Model): """Batch document analysis parameters. - All required parameters must be populated in order to send to server. - :ivar azure_blob_source: Azure Blob Storage location containing the batch documents. Either azureBlobSource or azureBlobFileListSource must be specified. :vartype azure_blob_source: ~azure.ai.documentintelligence.models.AzureBlobContentSource @@ -138,19 +147,27 @@ class AnalyzeBatchDocumentsRequest(_model_base.Model): :vartype overwrite_existing: bool """ - azure_blob_source: Optional["_models.AzureBlobContentSource"] = rest_field(name="azureBlobSource") + azure_blob_source: Optional["_models.AzureBlobContentSource"] = rest_field( + name="azureBlobSource", visibility=["read", "create", "update", "delete", "query"] + ) """Azure Blob Storage location containing the batch documents. Either azureBlobSource or azureBlobFileListSource must be specified.""" azure_blob_file_list_source: Optional["_models.AzureBlobFileListContentSource"] = rest_field( - name="azureBlobFileListSource" + name="azureBlobFileListSource", visibility=["read", "create", "update", "delete", "query"] ) """Azure Blob Storage file list specifying the batch documents. Either azureBlobSource or azureBlobFileListSource must be specified.""" - result_container_url: str = rest_field(name="resultContainerUrl") + result_container_url: str = rest_field( + name="resultContainerUrl", visibility=["read", "create", "update", "delete", "query"] + ) """Azure Blob Storage container URL where analyze result files will be stored. Required.""" - result_prefix: Optional[str] = rest_field(name="resultPrefix") + result_prefix: Optional[str] = rest_field( + name="resultPrefix", visibility=["read", "create", "update", "delete", "query"] + ) """Blob name prefix of result files.""" - overwrite_existing: Optional[bool] = rest_field(name="overwriteExisting") + overwrite_existing: Optional[bool] = rest_field( + name="overwriteExisting", visibility=["read", "create", "update", "delete", "query"] + ) """Overwrite existing analyze result files?.""" @overload @@ -175,10 +192,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AnalyzeBatchOperation(_model_base.Model): +class AnalyzeBatchOperation(_Model): """Status and result of the analyze batch operation. - :ivar result_id: Analyze batch operation result ID. :vartype result_id: str :ivar status: Operation status. notStarted, running, succeeded, or failed. Required. Known @@ -197,20 +213,32 @@ class AnalyzeBatchOperation(_model_base.Model): :vartype result: ~azure.ai.documentintelligence.models.AnalyzeBatchResult """ - result_id: Optional[str] = rest_field(name="resultId") + result_id: Optional[str] = rest_field(name="resultId", visibility=["read", "create", "update", "delete", "query"]) """Analyze batch operation result ID.""" - status: Union[str, "_models.DocumentIntelligenceOperationStatus"] = rest_field() + status: Union[str, "_models.DocumentIntelligenceOperationStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Operation status. notStarted, running, succeeded, or failed. Required. Known values are: \"notStarted\", \"running\", \"failed\", \"succeeded\", \"canceled\", and \"skipped\".""" - created_date_time: datetime.datetime = rest_field(name="createdDateTime", format="rfc3339") + created_date_time: datetime.datetime = rest_field( + name="createdDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """Date and time (UTC) when the operation was submitted. Required.""" - last_updated_date_time: datetime.datetime = rest_field(name="lastUpdatedDateTime", format="rfc3339") + last_updated_date_time: datetime.datetime = rest_field( + name="lastUpdatedDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """Date and time (UTC) when the status was last updated. Required.""" - percent_completed: Optional[int] = rest_field(name="percentCompleted") + percent_completed: Optional[int] = rest_field( + name="percentCompleted", visibility=["read", "create", "update", "delete", "query"] + ) """Operation progress (0-100).""" - error: Optional["_models.DocumentIntelligenceError"] = rest_field() + error: Optional["_models.DocumentIntelligenceError"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Encountered error during batch document analysis.""" - result: Optional["_models.AnalyzeBatchResult"] = rest_field() + result: Optional["_models.AnalyzeBatchResult"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Batch document analysis result.""" @overload @@ -237,10 +265,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AnalyzeBatchOperationDetail(_model_base.Model): +class AnalyzeBatchOperationDetail(_Model): """Operation detail for a document in a batch analysis. - :ivar status: Analyze status. succeeded, failed, or skipped. Required. Known values are: "notStarted", "running", "failed", "succeeded", "canceled", and "skipped". :vartype status: str or @@ -253,14 +280,18 @@ class AnalyzeBatchOperationDetail(_model_base.Model): :vartype error: ~azure.ai.documentintelligence.models.DocumentIntelligenceError """ - status: Union[str, "_models.DocumentIntelligenceOperationStatus"] = rest_field() + status: Union[str, "_models.DocumentIntelligenceOperationStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Analyze status. succeeded, failed, or skipped. Required. Known values are: \"notStarted\", \"running\", \"failed\", \"succeeded\", \"canceled\", and \"skipped\".""" - source_url: str = rest_field(name="sourceUrl") + source_url: str = rest_field(name="sourceUrl", visibility=["read", "create", "update", "delete", "query"]) """URL of the source document. Required.""" - result_url: Optional[str] = rest_field(name="resultUrl") + result_url: Optional[str] = rest_field(name="resultUrl", visibility=["read", "create", "update", "delete", "query"]) """URL of the analyze result JSON.""" - error: Optional["_models.DocumentIntelligenceError"] = rest_field() + error: Optional["_models.DocumentIntelligenceError"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Encountered error.""" @overload @@ -284,10 +315,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AnalyzeBatchResult(_model_base.Model): +class AnalyzeBatchResult(_Model): """Batch document analysis result. - :ivar succeeded_count: Number of documents that completed with status succeeded. Required. :vartype succeeded_count: int :ivar failed_count: Number of documents that completed with status failed. Required. @@ -298,13 +328,15 @@ class AnalyzeBatchResult(_model_base.Model): :vartype details: list[~azure.ai.documentintelligence.models.AnalyzeBatchOperationDetail] """ - succeeded_count: int = rest_field(name="succeededCount") + succeeded_count: int = rest_field(name="succeededCount", visibility=["read", "create", "update", "delete", "query"]) """Number of documents that completed with status succeeded. Required.""" - failed_count: int = rest_field(name="failedCount") + failed_count: int = rest_field(name="failedCount", visibility=["read", "create", "update", "delete", "query"]) """Number of documents that completed with status failed. Required.""" - skipped_count: int = rest_field(name="skippedCount") + skipped_count: int = rest_field(name="skippedCount", visibility=["read", "create", "update", "delete", "query"]) """Number of documents that completed with status skipped. Required.""" - details: Optional[List["_models.AnalyzeBatchOperationDetail"]] = rest_field() + details: Optional[List["_models.AnalyzeBatchOperationDetail"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Operation detail for each document in the batch.""" @overload @@ -328,10 +360,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AnalyzedDocument(_model_base.Model): +class AnalyzedDocument(_Model): """An object describing the location and semantic content of a document. - :ivar doc_type: Document type. Required. :vartype doc_type: str :ivar bounding_regions: Bounding regions covering the document. @@ -344,15 +375,19 @@ class AnalyzedDocument(_model_base.Model): :vartype confidence: float """ - doc_type: str = rest_field(name="docType") + doc_type: str = rest_field(name="docType", visibility=["read", "create", "update", "delete", "query"]) """Document type. Required.""" - bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field(name="boundingRegions") + bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field( + name="boundingRegions", visibility=["read", "create", "update", "delete", "query"] + ) """Bounding regions covering the document.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the document in the reading order concatenated content. Required.""" - fields: Optional[Dict[str, "_models.DocumentField"]] = rest_field() + fields: Optional[Dict[str, "_models.DocumentField"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Dictionary of named field values.""" - confidence: float = rest_field() + confidence: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Confidence of correctly extracting the document. Required.""" @overload @@ -377,7 +412,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AnalyzeDocumentRequest(_model_base.Model): +class AnalyzeDocumentRequest(_Model): """Document analysis parameters. :ivar url_source: Document URL to analyze. Either urlSource or base64Source must be specified. @@ -388,9 +423,11 @@ class AnalyzeDocumentRequest(_model_base.Model): :vartype bytes_source: bytes """ - url_source: Optional[str] = rest_field(name="urlSource") + url_source: Optional[str] = rest_field(name="urlSource", visibility=["read", "create", "update", "delete", "query"]) """Document URL to analyze. Either urlSource or base64Source must be specified.""" - bytes_source: Optional[bytes] = rest_field(name="base64Source", format="base64") + bytes_source: Optional[bytes] = rest_field( + name="base64Source", visibility=["read", "create", "update", "delete", "query"], format="base64" + ) """Base64 encoding of the document to analyze. Either urlSource or base64Source must be specified.""" @@ -413,10 +450,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AnalyzeResult(_model_base.Model): +class AnalyzeResult(_Model): """Document analysis result. - :ivar api_version: API version used to produce this result. Required. :vartype api_version: str :ivar model_id: Document model ID used to produce this result. Required. @@ -452,37 +488,59 @@ class AnalyzeResult(_model_base.Model): :vartype warnings: list[~azure.ai.documentintelligence.models.DocumentIntelligenceWarning] """ - api_version: str = rest_field(name="apiVersion") + api_version: str = rest_field(name="apiVersion", visibility=["read", "create", "update", "delete", "query"]) """API version used to produce this result. Required.""" - model_id: str = rest_field(name="modelId") + model_id: str = rest_field(name="modelId", visibility=["read", "create", "update", "delete", "query"]) """Document model ID used to produce this result. Required.""" - string_index_type: Union[str, "_models.StringIndexType"] = rest_field(name="stringIndexType") + string_index_type: Union[str, "_models.StringIndexType"] = rest_field( + name="stringIndexType", visibility=["read", "create", "update", "delete", "query"] + ) """Method used to compute string offset and length. Required. Known values are: \"textElements\", \"unicodeCodePoint\", and \"utf16CodeUnit\".""" - content_format: Optional[Union[str, "_models.DocumentContentFormat"]] = rest_field(name="contentFormat") + content_format: Optional[Union[str, "_models.DocumentContentFormat"]] = rest_field( + name="contentFormat", visibility=["read", "create", "update", "delete", "query"] + ) """Format of the analyze result top-level content. Known values are: \"text\" and \"markdown\".""" - content: str = rest_field() + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Concatenate string representation of all textual and visual elements in reading order. Required.""" - pages: List["_models.DocumentPage"] = rest_field() + pages: List["_models.DocumentPage"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Analyzed pages. Required.""" - paragraphs: Optional[List["_models.DocumentParagraph"]] = rest_field() + paragraphs: Optional[List["_models.DocumentParagraph"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted paragraphs.""" - tables: Optional[List["_models.DocumentTable"]] = rest_field() + tables: Optional[List["_models.DocumentTable"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted tables.""" - figures: Optional[List["_models.DocumentFigure"]] = rest_field() + figures: Optional[List["_models.DocumentFigure"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted figures.""" - sections: Optional[List["_models.DocumentSection"]] = rest_field() + sections: Optional[List["_models.DocumentSection"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted sections.""" - key_value_pairs: Optional[List["_models.DocumentKeyValuePair"]] = rest_field(name="keyValuePairs") + key_value_pairs: Optional[List["_models.DocumentKeyValuePair"]] = rest_field( + name="keyValuePairs", visibility=["read", "create", "update", "delete", "query"] + ) """Extracted key-value pairs.""" - styles: Optional[List["_models.DocumentStyle"]] = rest_field() + styles: Optional[List["_models.DocumentStyle"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted font styles.""" - languages: Optional[List["_models.DocumentLanguage"]] = rest_field() + languages: Optional[List["_models.DocumentLanguage"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Detected languages.""" - documents: Optional[List["_models.AnalyzedDocument"]] = rest_field() + documents: Optional[List["_models.AnalyzedDocument"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted documents.""" - warnings: Optional[List["_models.DocumentIntelligenceWarning"]] = rest_field() + warnings: Optional[List["_models.DocumentIntelligenceWarning"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """List of warnings encountered.""" @overload @@ -517,11 +575,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AuthorizeClassifierCopyRequest(_model_base.Model): +class AuthorizeClassifierCopyRequest(_Model): """Request body to authorize document classifier copy. - All required parameters must be populated in order to send to server. - :ivar classifier_id: Unique document classifier name. Required. :vartype classifier_id: str :ivar description: Document classifier description. @@ -530,11 +586,11 @@ class AuthorizeClassifierCopyRequest(_model_base.Model): :vartype tags: dict[str, str] """ - classifier_id: str = rest_field(name="classifierId") + classifier_id: str = rest_field(name="classifierId", visibility=["read", "create", "update", "delete", "query"]) """Unique document classifier name. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Document classifier description.""" - tags: Optional[Dict[str, str]] = rest_field() + tags: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of key-value tag attributes associated with the document classifier.""" @overload @@ -557,11 +613,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AuthorizeCopyRequest(_model_base.Model): +class AuthorizeCopyRequest(_Model): """Request body to authorize document model copy. - All required parameters must be populated in order to send to server. - :ivar model_id: Unique document model name. Required. :vartype model_id: str :ivar description: Document model description. @@ -570,11 +624,11 @@ class AuthorizeCopyRequest(_model_base.Model): :vartype tags: dict[str, str] """ - model_id: str = rest_field(name="modelId") + model_id: str = rest_field(name="modelId", visibility=["read", "create", "update", "delete", "query"]) """Unique document model name. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Document model description.""" - tags: Optional[Dict[str, str]] = rest_field() + tags: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of key-value tag attributes associated with the document model.""" @overload @@ -597,19 +651,18 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AzureBlobContentSource(_model_base.Model): +class AzureBlobContentSource(_Model): """Azure Blob Storage content. - :ivar container_url: Azure Blob Storage container URL. Required. :vartype container_url: str :ivar prefix: Blob name prefix. :vartype prefix: str """ - container_url: str = rest_field(name="containerUrl") + container_url: str = rest_field(name="containerUrl", visibility=["read", "create", "update", "delete", "query"]) """Azure Blob Storage container URL. Required.""" - prefix: Optional[str] = rest_field() + prefix: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Blob name prefix.""" @overload @@ -631,10 +684,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AzureBlobFileListContentSource(_model_base.Model): +class AzureBlobFileListContentSource(_Model): """File list in Azure Blob Storage. - :ivar container_url: Azure Blob Storage container URL. Required. :vartype container_url: str :ivar file_list: Path to a JSONL file within the container specifying a subset of documents. @@ -642,9 +694,9 @@ class AzureBlobFileListContentSource(_model_base.Model): :vartype file_list: str """ - container_url: str = rest_field(name="containerUrl") + container_url: str = rest_field(name="containerUrl", visibility=["read", "create", "update", "delete", "query"]) """Azure Blob Storage container URL. Required.""" - file_list: str = rest_field(name="fileList") + file_list: str = rest_field(name="fileList", visibility=["read", "create", "update", "delete", "query"]) """Path to a JSONL file within the container specifying a subset of documents. Required.""" @overload @@ -666,10 +718,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BoundingRegion(_model_base.Model): +class BoundingRegion(_Model): """Bounding polygon on a specific page of the input. - :ivar page_number: 1-based page number of page containing the bounding region. Required. :vartype page_number: int :ivar polygon: Bounding polygon on the page, or the entire page if not specified. @@ -679,9 +730,9 @@ class BoundingRegion(_model_base.Model): :vartype polygon: list[float] """ - page_number: int = rest_field(name="pageNumber") + page_number: int = rest_field(name="pageNumber", visibility=["read", "create", "update", "delete", "query"]) """1-based page number of page containing the bounding region. Required.""" - polygon: List[float] = rest_field() + polygon: List[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Bounding polygon on the page, or the entire page if not specified. Coordinates specified relative to the top-left of the page. The numbers represent the x, y values of the polygon vertices, clockwise from the left @@ -706,11 +757,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BuildDocumentClassifierRequest(_model_base.Model): +class BuildDocumentClassifierRequest(_Model): """Request body to build a new custom document classifier. - All required parameters must be populated in order to send to server. - :ivar classifier_id: Unique document classifier name. Required. :vartype classifier_id: str :ivar description: Document classifier description. @@ -724,15 +773,21 @@ class BuildDocumentClassifierRequest(_model_base.Model): :vartype allow_overwrite: bool """ - classifier_id: str = rest_field(name="classifierId") + classifier_id: str = rest_field(name="classifierId", visibility=["read", "create", "update", "delete", "query"]) """Unique document classifier name. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Document classifier description.""" - base_classifier_id: Optional[str] = rest_field(name="baseClassifierId") + base_classifier_id: Optional[str] = rest_field( + name="baseClassifierId", visibility=["read", "create", "update", "delete", "query"] + ) """Base classifierId on top of which to train the classifier.""" - doc_types: Dict[str, "_models.ClassifierDocumentTypeDetails"] = rest_field(name="docTypes") + doc_types: Dict[str, "_models.ClassifierDocumentTypeDetails"] = rest_field( + name="docTypes", visibility=["read", "create", "update", "delete", "query"] + ) """List of document types to classify against. Required.""" - allow_overwrite: Optional[bool] = rest_field(name="allowOverwrite") + allow_overwrite: Optional[bool] = rest_field( + name="allowOverwrite", visibility=["read", "create", "update", "delete", "query"] + ) """Allow overwriting an existing classifier with the same name.""" @overload @@ -757,11 +812,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BuildDocumentModelRequest(_model_base.Model): +class BuildDocumentModelRequest(_Model): """Request body to build a new custom document model. - All required parameters must be populated in order to send to server. - :ivar model_id: Unique document model name. Required. :vartype model_id: str :ivar description: Document model description. @@ -786,25 +839,33 @@ class BuildDocumentModelRequest(_model_base.Model): :vartype allow_overwrite: bool """ - model_id: str = rest_field(name="modelId") + model_id: str = rest_field(name="modelId", visibility=["read", "create", "update", "delete", "query"]) """Unique document model name. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Document model description.""" - build_mode: Union[str, "_models.DocumentBuildMode"] = rest_field(name="buildMode") + build_mode: Union[str, "_models.DocumentBuildMode"] = rest_field( + name="buildMode", visibility=["read", "create", "update", "delete", "query"] + ) """Custom document model build mode. Required. Known values are: \"template\" and \"neural\".""" - azure_blob_source: Optional["_models.AzureBlobContentSource"] = rest_field(name="azureBlobSource") + azure_blob_source: Optional["_models.AzureBlobContentSource"] = rest_field( + name="azureBlobSource", visibility=["read", "create", "update", "delete", "query"] + ) """Azure Blob Storage location containing the training data. Either azureBlobSource or azureBlobFileListSource must be specified.""" azure_blob_file_list_source: Optional["_models.AzureBlobFileListContentSource"] = rest_field( - name="azureBlobFileListSource" + name="azureBlobFileListSource", visibility=["read", "create", "update", "delete", "query"] ) """Azure Blob Storage file list specifying the training data. Either azureBlobSource or azureBlobFileListSource must be specified.""" - tags: Optional[Dict[str, str]] = rest_field() + tags: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of key-value tag attributes associated with the document model.""" - max_training_hours: Optional[float] = rest_field(name="maxTrainingHours") + max_training_hours: Optional[float] = rest_field( + name="maxTrainingHours", visibility=["read", "create", "update", "delete", "query"] + ) """Max number of V100-equivalent GPU hours to use for model training. Default=0.5.""" - allow_overwrite: Optional[bool] = rest_field(name="allowOverwrite") + allow_overwrite: Optional[bool] = rest_field( + name="allowOverwrite", visibility=["read", "create", "update", "delete", "query"] + ) """Allow overwriting an existing model with the same name.""" @overload @@ -832,11 +893,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ClassifierCopyAuthorization(_model_base.Model): +class ClassifierCopyAuthorization(_Model): """Authorization to copy a document classifier to the specified target resource and classifierId. - :ivar target_resource_id: ID of the target Azure resource where the document classifier should be copied to. Required. :vartype target_resource_id: str @@ -855,18 +915,28 @@ class ClassifierCopyAuthorization(_model_base.Model): :vartype expiration_date_time: ~datetime.datetime """ - target_resource_id: str = rest_field(name="targetResourceId") + target_resource_id: str = rest_field( + name="targetResourceId", visibility=["read", "create", "update", "delete", "query"] + ) """ID of the target Azure resource where the document classifier should be copied to. Required.""" - target_resource_region: str = rest_field(name="targetResourceRegion") + target_resource_region: str = rest_field( + name="targetResourceRegion", visibility=["read", "create", "update", "delete", "query"] + ) """Location of the target Azure resource where the document classifier should be copied to. Required.""" - target_classifier_id: str = rest_field(name="targetClassifierId") + target_classifier_id: str = rest_field( + name="targetClassifierId", visibility=["read", "create", "update", "delete", "query"] + ) """Identifier of the target document classifier. Required.""" - target_classifier_location: str = rest_field(name="targetClassifierLocation") + target_classifier_location: str = rest_field( + name="targetClassifierLocation", visibility=["read", "create", "update", "delete", "query"] + ) """URL of the copied document classifier in the target account. Required.""" - access_token: str = rest_field(name="accessToken") + access_token: str = rest_field(name="accessToken", visibility=["read", "create", "update", "delete", "query"]) """Token used to authorize the request. Required.""" - expiration_date_time: datetime.datetime = rest_field(name="expirationDateTime", format="rfc3339") + expiration_date_time: datetime.datetime = rest_field( + name="expirationDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """Date/time when the access token expires. Required.""" @overload @@ -892,7 +962,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ClassifierDocumentTypeDetails(_model_base.Model): +class ClassifierDocumentTypeDetails(_Model): """Classifier document type info. :ivar source_kind: Type of training data source. Known values are: "url", "base64", @@ -911,15 +981,19 @@ class ClassifierDocumentTypeDetails(_model_base.Model): ~azure.ai.documentintelligence.models.AzureBlobFileListContentSource """ - source_kind: Optional[Union[str, "_models.ContentSourceKind"]] = rest_field(name="sourceKind") + source_kind: Optional[Union[str, "_models.ContentSourceKind"]] = rest_field( + name="sourceKind", visibility=["read", "create", "update", "delete", "query"] + ) """Type of training data source. Known values are: \"url\", \"base64\", \"azureBlob\", and \"azureBlobFileList\".""" - azure_blob_source: Optional["_models.AzureBlobContentSource"] = rest_field(name="azureBlobSource") + azure_blob_source: Optional["_models.AzureBlobContentSource"] = rest_field( + name="azureBlobSource", visibility=["read", "create", "update", "delete", "query"] + ) """Azure Blob Storage location containing the training data for a classifier document type. Either azureBlobSource or azureBlobFileListSource must be specified.""" azure_blob_file_list_source: Optional["_models.AzureBlobFileListContentSource"] = rest_field( - name="azureBlobFileListSource" + name="azureBlobFileListSource", visibility=["read", "create", "update", "delete", "query"] ) """Azure Blob Storage file list specifying the training data for a classifier document type. Either azureBlobSource or azureBlobFileListSource must be @@ -945,7 +1019,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ClassifyDocumentRequest(_model_base.Model): +class ClassifyDocumentRequest(_Model): """Document classification parameters. :ivar url_source: Document URL to classify. Either urlSource or base64Source must be @@ -957,9 +1031,11 @@ class ClassifyDocumentRequest(_model_base.Model): :vartype bytes_source: bytes """ - url_source: Optional[str] = rest_field(name="urlSource") + url_source: Optional[str] = rest_field(name="urlSource", visibility=["read", "create", "update", "delete", "query"]) """Document URL to classify. Either urlSource or base64Source must be specified.""" - bytes_source: Optional[bytes] = rest_field(name="base64Source", format="base64") + bytes_source: Optional[bytes] = rest_field( + name="base64Source", visibility=["read", "create", "update", "delete", "query"], format="base64" + ) """Base64 encoding of the document to classify. Either urlSource or base64Source must be specified.""" @@ -982,11 +1058,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ComposeDocumentModelRequest(_model_base.Model): +class ComposeDocumentModelRequest(_Model): """Request body to create a composed document model from component document models. - All required parameters must be populated in order to send to server. - :ivar model_id: Unique document model name. Required. :vartype model_id: str :ivar description: Document model description. @@ -1002,17 +1076,21 @@ class ComposeDocumentModelRequest(_model_base.Model): :vartype tags: dict[str, str] """ - model_id: str = rest_field(name="modelId") + model_id: str = rest_field(name="modelId", visibility=["read", "create", "update", "delete", "query"]) """Unique document model name. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Document model description.""" - classifier_id: str = rest_field(name="classifierId") + classifier_id: str = rest_field(name="classifierId", visibility=["read", "create", "update", "delete", "query"]) """Custom classifier to split and classify the input file. Required.""" - split: Optional[Union[str, "_models.SplitMode"]] = rest_field() + split: Optional[Union[str, "_models.SplitMode"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """File splitting behavior. Known values are: \"auto\", \"none\", and \"perPage\".""" - doc_types: Dict[str, "_models.DocumentTypeDetails"] = rest_field(name="docTypes") + doc_types: Dict[str, "_models.DocumentTypeDetails"] = rest_field( + name="docTypes", visibility=["read", "create", "update", "delete", "query"] + ) """Dictionary mapping supported docTypes to the corresponding document models. Required.""" - tags: Optional[Dict[str, str]] = rest_field() + tags: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of key-value tag attributes associated with the document model.""" @overload @@ -1038,10 +1116,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class CurrencyValue(_model_base.Model): +class CurrencyValue(_Model): """Currency field value. - :ivar amount: Currency amount. Required. :vartype amount: float :ivar currency_symbol: Currency symbol label, if any. @@ -1050,11 +1127,15 @@ class CurrencyValue(_model_base.Model): :vartype currency_code: str """ - amount: float = rest_field() + amount: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Currency amount. Required.""" - currency_symbol: Optional[str] = rest_field(name="currencySymbol") + currency_symbol: Optional[str] = rest_field( + name="currencySymbol", visibility=["read", "create", "update", "delete", "query"] + ) """Currency symbol label, if any.""" - currency_code: Optional[str] = rest_field(name="currencyCode") + currency_code: Optional[str] = rest_field( + name="currencyCode", visibility=["read", "create", "update", "delete", "query"] + ) """Resolved currency code (ISO 4217), if any.""" @overload @@ -1077,10 +1158,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class CustomDocumentModelsDetails(_model_base.Model): +class CustomDocumentModelsDetails(_Model): """Details regarding custom document models. - :ivar count: Number of custom document models in the current resource. Required. :vartype count: int :ivar limit: Maximum number of custom document models supported in the current resource. @@ -1088,9 +1168,9 @@ class CustomDocumentModelsDetails(_model_base.Model): :vartype limit: int """ - count: int = rest_field() + count: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Number of custom document models in the current resource. Required.""" - limit: int = rest_field() + limit: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Maximum number of custom document models supported in the current resource. Required.""" @overload @@ -1112,10 +1192,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentBarcode(_model_base.Model): +class DocumentBarcode(_Model): """A barcode object. - :ivar kind: Barcode kind. Required. Known values are: "QRCode", "PDF417", "UPCA", "UPCE", "Code39", "Code128", "EAN8", "EAN13", "DataBar", "Code93", "Codabar", "DataBarExpanded", "ITF", "MicroQRCode", "Aztec", "DataMatrix", and "MaxiCode". @@ -1133,20 +1212,22 @@ class DocumentBarcode(_model_base.Model): :vartype confidence: float """ - kind: Union[str, "_models.DocumentBarcodeKind"] = rest_field() + kind: Union[str, "_models.DocumentBarcodeKind"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Barcode kind. Required. Known values are: \"QRCode\", \"PDF417\", \"UPCA\", \"UPCE\", \"Code39\", \"Code128\", \"EAN8\", \"EAN13\", \"DataBar\", \"Code93\", \"Codabar\", \"DataBarExpanded\", \"ITF\", \"MicroQRCode\", \"Aztec\", \"DataMatrix\", and \"MaxiCode\".""" - value: str = rest_field() + value: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Barcode value. Required.""" - polygon: Optional[List[float]] = rest_field() + polygon: Optional[List[float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Bounding polygon of the barcode, with coordinates specified relative to the top-left of the page. The numbers represent the x, y values of the polygon vertices, clockwise from the left (-180 degrees inclusive) relative to the element orientation.""" - span: "_models.DocumentSpan" = rest_field() + span: "_models.DocumentSpan" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the barcode in the reading order concatenated content. Required.""" - confidence: float = rest_field() + confidence: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Confidence of correctly extracting the barcode. Required.""" @overload @@ -1171,10 +1252,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentCaption(_model_base.Model): +class DocumentCaption(_Model): """A caption object describing a table or figure. - :ivar content: Content of the caption. Required. :vartype content: str :ivar bounding_regions: Bounding regions covering the caption. @@ -1185,13 +1265,15 @@ class DocumentCaption(_model_base.Model): :vartype elements: list[str] """ - content: str = rest_field() + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Content of the caption. Required.""" - bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field(name="boundingRegions") + bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field( + name="boundingRegions", visibility=["read", "create", "update", "delete", "query"] + ) """Bounding regions covering the caption.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the caption in the reading order concatenated content. Required.""" - elements: Optional[List[str]] = rest_field() + elements: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Child elements of the caption.""" @overload @@ -1215,7 +1297,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentIntelligenceOperationDetails(_model_base.Model): +class DocumentIntelligenceOperationDetails(_Model): """Operation info. You probably want to use the sub-classes and not this class directly. Known sub-classes are: @@ -1223,7 +1305,6 @@ class DocumentIntelligenceOperationDetails(_model_base.Model): DocumentModelBuildOperationDetails, DocumentModelComposeOperationDetails, DocumentModelCopyToOperationDetails - :ivar operation_id: Operation ID. Required. :vartype operation_id: str :ivar status: Operation status. notStarted, running, completed, or failed. Required. Known @@ -1250,29 +1331,43 @@ class DocumentIntelligenceOperationDetails(_model_base.Model): :vartype error: ~azure.ai.documentintelligence.models.DocumentIntelligenceError """ - __mapping__: Dict[str, _model_base.Model] = {} + __mapping__: Dict[str, _Model] = {} operation_id: str = rest_field(name="operationId", visibility=["read", "create"]) """Operation ID. Required.""" - status: Union[str, "_models.DocumentIntelligenceOperationStatus"] = rest_field() + status: Union[str, "_models.DocumentIntelligenceOperationStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Operation status. notStarted, running, completed, or failed. Required. Known values are: \"notStarted\", \"running\", \"failed\", \"succeeded\", \"canceled\", and \"skipped\".""" - percent_completed: Optional[int] = rest_field(name="percentCompleted") + percent_completed: Optional[int] = rest_field( + name="percentCompleted", visibility=["read", "create", "update", "delete", "query"] + ) """Operation progress (0-100).""" - created_date_time: datetime.datetime = rest_field(name="createdDateTime", format="rfc3339") + created_date_time: datetime.datetime = rest_field( + name="createdDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """Date and time (UTC) when the operation was created. Required.""" - last_updated_date_time: datetime.datetime = rest_field(name="lastUpdatedDateTime", format="rfc3339") + last_updated_date_time: datetime.datetime = rest_field( + name="lastUpdatedDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """Date and time (UTC) when the status was last updated. Required.""" - kind: str = rest_discriminator(name="kind") + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """Type of operation. Required. Known values are: \"documentModelBuild\", \"documentModelCompose\", \"documentModelCopyTo\", \"documentClassifierCopyTo\", and \"documentClassifierBuild\".""" - resource_location: str = rest_field(name="resourceLocation") + resource_location: str = rest_field( + name="resourceLocation", visibility=["read", "create", "update", "delete", "query"] + ) """URL of the resource targeted by this operation. Required.""" - api_version: Optional[str] = rest_field(name="apiVersion") + api_version: Optional[str] = rest_field( + name="apiVersion", visibility=["read", "create", "update", "delete", "query"] + ) """API version used to create this operation.""" - tags: Optional[Dict[str, str]] = rest_field() + tags: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of key-value tag attributes associated with the document model.""" - error: Optional["_models.DocumentIntelligenceError"] = rest_field() + error: Optional["_models.DocumentIntelligenceError"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Encountered error.""" @overload @@ -1307,7 +1402,6 @@ class DocumentClassifierBuildOperationDetails( ): """Get Operation response object. - :ivar operation_id: Operation ID. Required. :vartype operation_id: str :ivar status: Operation status. notStarted, running, completed, or failed. Required. Known @@ -1334,9 +1428,11 @@ class DocumentClassifierBuildOperationDetails( :vartype kind: str or ~azure.ai.documentintelligence.models.DOCUMENT_CLASSIFIER_BUILD """ - result: Optional["_models.DocumentClassifierDetails"] = rest_field() + result: Optional["_models.DocumentClassifierDetails"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Operation result upon success.""" - kind: Literal[OperationKind.DOCUMENT_CLASSIFIER_BUILD] = rest_discriminator(name="kind") # type: ignore + kind: Literal[OperationKind.DOCUMENT_CLASSIFIER_BUILD] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Type of operation. Required. Build a new custom classifier model.""" @overload @@ -1371,7 +1467,6 @@ class DocumentClassifierCopyToOperationDetails( ): """Get Operation response object. - :ivar operation_id: Operation ID. Required. :vartype operation_id: str :ivar status: Operation status. notStarted, running, completed, or failed. Required. Known @@ -1400,9 +1495,11 @@ class DocumentClassifierCopyToOperationDetails( :vartype kind: str or ~azure.ai.documentintelligence.models.DOCUMENT_CLASSIFIER_COPY_TO """ - result: Optional["_models.DocumentClassifierDetails"] = rest_field() + result: Optional["_models.DocumentClassifierDetails"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Operation result upon success.""" - kind: Literal[OperationKind.DOCUMENT_CLASSIFIER_COPY_TO] = rest_discriminator(name="kind") # type: ignore + kind: Literal[OperationKind.DOCUMENT_CLASSIFIER_COPY_TO] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Type of operation. Required. Copy an existing document classifier to potentially a different resource, region, or subscription.""" @@ -1434,12 +1531,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, kind=OperationKind.DOCUMENT_CLASSIFIER_COPY_TO, **kwargs) -class DocumentClassifierDetails(_model_base.Model): +class DocumentClassifierDetails(_Model): """Document classifier info. - Readonly variables are only populated by the server, and will be ignored when sending a request. - - :ivar classifier_id: Unique document classifier name. Required. :vartype classifier_id: str :ivar description: Document classifier description. @@ -1464,23 +1558,33 @@ class DocumentClassifierDetails(_model_base.Model): classifier_id: str = rest_field(name="classifierId", visibility=["read", "create"]) """Unique document classifier name. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Document classifier description.""" - created_date_time: datetime.datetime = rest_field(name="createdDateTime", format="rfc3339") + created_date_time: datetime.datetime = rest_field( + name="createdDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """Date and time (UTC) when the document classifier was created. Required.""" - expiration_date_time: Optional[datetime.datetime] = rest_field(name="expirationDateTime", format="rfc3339") + expiration_date_time: Optional[datetime.datetime] = rest_field( + name="expirationDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """Date and time (UTC) when the document classifier will expire.""" modified_date_time: Optional[datetime.datetime] = rest_field( name="modifiedDateTime", visibility=["read"], format="rfc3339" ) """Date and time (UTC) when the document model was last modified.""" - api_version: str = rest_field(name="apiVersion") + api_version: str = rest_field(name="apiVersion", visibility=["read", "create", "update", "delete", "query"]) """API version used to create this document classifier. Required.""" - base_classifier_id: Optional[str] = rest_field(name="baseClassifierId") + base_classifier_id: Optional[str] = rest_field( + name="baseClassifierId", visibility=["read", "create", "update", "delete", "query"] + ) """Base classifierId on top of which the classifier was trained.""" - doc_types: Dict[str, "_models.ClassifierDocumentTypeDetails"] = rest_field(name="docTypes") + doc_types: Dict[str, "_models.ClassifierDocumentTypeDetails"] = rest_field( + name="docTypes", visibility=["read", "create", "update", "delete", "query"] + ) """List of document types to classify against. Required.""" - warnings: Optional[List["_models.DocumentIntelligenceWarning"]] = rest_field() + warnings: Optional[List["_models.DocumentIntelligenceWarning"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """List of warnings encountered while building the classifier.""" @overload @@ -1508,10 +1612,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentField(_model_base.Model): +class DocumentField(_Model): """An object representing the content and location of a field value. - :ivar type: Data type of the field value. Required. Known values are: "string", "date", "time", "phoneNumber", "number", "integer", "selectionMark", "countryRegion", "signature", "array", "object", "currency", "address", "boolean", and "selectionGroup". @@ -1558,49 +1661,83 @@ class DocumentField(_model_base.Model): :vartype confidence: float """ - type: Union[str, "_models.DocumentFieldType"] = rest_field() + type: Union[str, "_models.DocumentFieldType"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Data type of the field value. Required. Known values are: \"string\", \"date\", \"time\", \"phoneNumber\", \"number\", \"integer\", \"selectionMark\", \"countryRegion\", \"signature\", \"array\", \"object\", \"currency\", \"address\", \"boolean\", and \"selectionGroup\".""" - value_string: Optional[str] = rest_field(name="valueString") + value_string: Optional[str] = rest_field( + name="valueString", visibility=["read", "create", "update", "delete", "query"] + ) """String value.""" - value_date: Optional[datetime.date] = rest_field(name="valueDate") + value_date: Optional[datetime.date] = rest_field( + name="valueDate", visibility=["read", "create", "update", "delete", "query"] + ) """Date value in YYYY-MM-DD format (ISO 8601).""" - value_time: Optional[datetime.time] = rest_field(name="valueTime") + value_time: Optional[datetime.time] = rest_field( + name="valueTime", visibility=["read", "create", "update", "delete", "query"] + ) """Time value in hh:mm:ss format (ISO 8601).""" - value_phone_number: Optional[str] = rest_field(name="valuePhoneNumber") + value_phone_number: Optional[str] = rest_field( + name="valuePhoneNumber", visibility=["read", "create", "update", "delete", "query"] + ) """Phone number value in E.164 format (ex. +19876543210).""" - value_number: Optional[float] = rest_field(name="valueNumber") + value_number: Optional[float] = rest_field( + name="valueNumber", visibility=["read", "create", "update", "delete", "query"] + ) """Floating point value.""" - value_integer: Optional[int] = rest_field(name="valueInteger") + value_integer: Optional[int] = rest_field( + name="valueInteger", visibility=["read", "create", "update", "delete", "query"] + ) """Integer value.""" value_selection_mark: Optional[Union[str, "_models.DocumentSelectionMarkState"]] = rest_field( - name="valueSelectionMark" + name="valueSelectionMark", visibility=["read", "create", "update", "delete", "query"] ) """Selection mark value. Known values are: \"selected\" and \"unselected\".""" - value_signature: Optional[Union[str, "_models.DocumentSignatureType"]] = rest_field(name="valueSignature") + value_signature: Optional[Union[str, "_models.DocumentSignatureType"]] = rest_field( + name="valueSignature", visibility=["read", "create", "update", "delete", "query"] + ) """Presence of signature. Known values are: \"signed\" and \"unsigned\".""" - value_country_region: Optional[str] = rest_field(name="valueCountryRegion") + value_country_region: Optional[str] = rest_field( + name="valueCountryRegion", visibility=["read", "create", "update", "delete", "query"] + ) """3-letter country code value (ISO 3166-1 alpha-3).""" - value_array: Optional[List["_models.DocumentField"]] = rest_field(name="valueArray") + value_array: Optional[List["_models.DocumentField"]] = rest_field( + name="valueArray", visibility=["read", "create", "update", "delete", "query"] + ) """Array of field values.""" - value_object: Optional[Dict[str, "_models.DocumentField"]] = rest_field(name="valueObject") + value_object: Optional[Dict[str, "_models.DocumentField"]] = rest_field( + name="valueObject", visibility=["read", "create", "update", "delete", "query"] + ) """Dictionary of named field values.""" - value_currency: Optional["_models.CurrencyValue"] = rest_field(name="valueCurrency") + value_currency: Optional["_models.CurrencyValue"] = rest_field( + name="valueCurrency", visibility=["read", "create", "update", "delete", "query"] + ) """Currency value.""" - value_address: Optional["_models.AddressValue"] = rest_field(name="valueAddress") + value_address: Optional["_models.AddressValue"] = rest_field( + name="valueAddress", visibility=["read", "create", "update", "delete", "query"] + ) """Address value.""" - value_boolean: Optional[bool] = rest_field(name="valueBoolean") + value_boolean: Optional[bool] = rest_field( + name="valueBoolean", visibility=["read", "create", "update", "delete", "query"] + ) """Boolean value.""" - value_selection_group: Optional[List[str]] = rest_field(name="valueSelectionGroup") + value_selection_group: Optional[List[str]] = rest_field( + name="valueSelectionGroup", visibility=["read", "create", "update", "delete", "query"] + ) """Selection group value.""" - content: Optional[str] = rest_field() + content: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Field content.""" - bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field(name="boundingRegions") + bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field( + name="boundingRegions", visibility=["read", "create", "update", "delete", "query"] + ) """Bounding regions covering the field.""" - spans: Optional[List["_models.DocumentSpan"]] = rest_field() + spans: Optional[List["_models.DocumentSpan"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Location of the field in the reading order concatenated content.""" - confidence: Optional[float] = rest_field() + confidence: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Confidence of correctly extracting the field.""" @overload @@ -1640,10 +1777,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentFieldSchema(_model_base.Model): +class DocumentFieldSchema(_Model): """Description of the field semantic schema using a JSON Schema style syntax. - :ivar type: Semantic data type of the field value. Required. Known values are: "string", "date", "time", "phoneNumber", "number", "integer", "selectionMark", "countryRegion", "signature", "array", "object", "currency", "address", "boolean", and "selectionGroup". @@ -1658,18 +1794,24 @@ class DocumentFieldSchema(_model_base.Model): :vartype properties: dict[str, ~azure.ai.documentintelligence.models.DocumentFieldSchema] """ - type: Union[str, "_models.DocumentFieldType"] = rest_field() + type: Union[str, "_models.DocumentFieldType"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Semantic data type of the field value. Required. Known values are: \"string\", \"date\", \"time\", \"phoneNumber\", \"number\", \"integer\", \"selectionMark\", \"countryRegion\", \"signature\", \"array\", \"object\", \"currency\", \"address\", \"boolean\", and \"selectionGroup\".""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Field description.""" - example: Optional[str] = rest_field() + example: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Example field content.""" - items_schema: Optional["_models.DocumentFieldSchema"] = rest_field(name="items") + items_schema: Optional["_models.DocumentFieldSchema"] = rest_field( + name="items", visibility=["read", "create", "update", "delete", "query"] + ) """Field type schema of each array element.""" - properties: Optional[Dict[str, "_models.DocumentFieldSchema"]] = rest_field() + properties: Optional[Dict[str, "_models.DocumentFieldSchema"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Named sub-fields of the object field.""" @overload @@ -1694,10 +1836,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentFigure(_model_base.Model): +class DocumentFigure(_Model): """An object representing a figure in the document. - :ivar bounding_regions: Bounding regions covering the figure. :vartype bounding_regions: list[~azure.ai.documentintelligence.models.BoundingRegion] :ivar spans: Location of the figure in the reading order concatenated content. Required. @@ -1712,17 +1853,23 @@ class DocumentFigure(_model_base.Model): :vartype id: str """ - bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field(name="boundingRegions") + bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field( + name="boundingRegions", visibility=["read", "create", "update", "delete", "query"] + ) """Bounding regions covering the figure.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the figure in the reading order concatenated content. Required.""" - elements: Optional[List[str]] = rest_field() + elements: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Child elements of the figure, excluding any caption or footnotes.""" - caption: Optional["_models.DocumentCaption"] = rest_field() + caption: Optional["_models.DocumentCaption"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Caption associated with the figure.""" - footnotes: Optional[List["_models.DocumentFootnote"]] = rest_field() + footnotes: Optional[List["_models.DocumentFootnote"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """List of footnotes associated with the figure.""" - id: Optional[str] = rest_field() + id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Figure ID.""" @overload @@ -1748,10 +1895,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentFootnote(_model_base.Model): +class DocumentFootnote(_Model): """A footnote object describing a table or figure. - :ivar content: Content of the footnote. Required. :vartype content: str :ivar bounding_regions: Bounding regions covering the footnote. @@ -1762,13 +1908,15 @@ class DocumentFootnote(_model_base.Model): :vartype elements: list[str] """ - content: str = rest_field() + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Content of the footnote. Required.""" - bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field(name="boundingRegions") + bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field( + name="boundingRegions", visibility=["read", "create", "update", "delete", "query"] + ) """Bounding regions covering the footnote.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the footnote in the reading order concatenated content. Required.""" - elements: Optional[List[str]] = rest_field() + elements: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Child elements of the footnote.""" @overload @@ -1792,10 +1940,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentFormula(_model_base.Model): +class DocumentFormula(_Model): """A formula object. - :ivar kind: Formula kind. Required. Known values are: "inline" and "display". :vartype kind: str or ~azure.ai.documentintelligence.models.DocumentFormulaKind :ivar value: LaTex expression describing the formula. Required. @@ -1811,18 +1958,20 @@ class DocumentFormula(_model_base.Model): :vartype confidence: float """ - kind: Union[str, "_models.DocumentFormulaKind"] = rest_field() + kind: Union[str, "_models.DocumentFormulaKind"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Formula kind. Required. Known values are: \"inline\" and \"display\".""" - value: str = rest_field() + value: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """LaTex expression describing the formula. Required.""" - polygon: Optional[List[float]] = rest_field() + polygon: Optional[List[float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Bounding polygon of the formula, with coordinates specified relative to the top-left of the page. The numbers represent the x, y values of the polygon vertices, clockwise from the left (-180 degrees inclusive) relative to the element orientation.""" - span: "_models.DocumentSpan" = rest_field() + span: "_models.DocumentSpan" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the formula in the reading order concatenated content. Required.""" - confidence: float = rest_field() + confidence: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Confidence of correctly extracting the formula. Required.""" @overload @@ -1847,10 +1996,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentIntelligenceError(_model_base.Model): +class DocumentIntelligenceError(_Model): """The error object. - :ivar code: One of a server-defined set of error codes. Required. :vartype code: str :ivar message: A human-readable representation of the error. Required. @@ -1864,15 +2012,19 @@ class DocumentIntelligenceError(_model_base.Model): :vartype innererror: ~azure.ai.documentintelligence.models.DocumentIntelligenceInnerError """ - code: str = rest_field() + code: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """One of a server-defined set of error codes. Required.""" - message: str = rest_field() + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A human-readable representation of the error. Required.""" - target: Optional[str] = rest_field() + target: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The target of the error.""" - details: Optional[List["_models.DocumentIntelligenceError"]] = rest_field() + details: Optional[List["_models.DocumentIntelligenceError"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """An array of details about specific errors that led to this reported error.""" - innererror: Optional["_models.DocumentIntelligenceInnerError"] = rest_field() + innererror: Optional["_models.DocumentIntelligenceInnerError"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """An object containing more specific information than the current object about the error.""" @overload @@ -1897,16 +2049,14 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentIntelligenceErrorResponse(_model_base.Model): +class DocumentIntelligenceErrorResponse(_Model): """Error response object. - All required parameters must be populated in order to send to server. - :ivar error: Error info. Required. :vartype error: ~azure.ai.documentintelligence.models.DocumentIntelligenceError """ - error: "_models.DocumentIntelligenceError" = rest_field() + error: "_models.DocumentIntelligenceError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Error info. Required.""" @overload @@ -1927,7 +2077,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentIntelligenceInnerError(_model_base.Model): +class DocumentIntelligenceInnerError(_Model): """An object containing more specific information about the error. :ivar code: One of a server-defined set of error codes. @@ -1938,11 +2088,13 @@ class DocumentIntelligenceInnerError(_model_base.Model): :vartype innererror: ~azure.ai.documentintelligence.models.DocumentIntelligenceInnerError """ - code: Optional[str] = rest_field() + code: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """One of a server-defined set of error codes.""" - message: Optional[str] = rest_field() + message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A human-readable representation of the error.""" - innererror: Optional["_models.DocumentIntelligenceInnerError"] = rest_field() + innererror: Optional["_models.DocumentIntelligenceInnerError"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Inner error.""" @overload @@ -1965,16 +2117,17 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentIntelligenceResourceDetails(_model_base.Model): +class DocumentIntelligenceResourceDetails(_Model): """General information regarding the current resource. - :ivar custom_document_models: Details regarding custom document models. Required. :vartype custom_document_models: ~azure.ai.documentintelligence.models.CustomDocumentModelsDetails """ - custom_document_models: "_models.CustomDocumentModelsDetails" = rest_field(name="customDocumentModels") + custom_document_models: "_models.CustomDocumentModelsDetails" = rest_field( + name="customDocumentModels", visibility=["read", "create", "update", "delete", "query"] + ) """Details regarding custom document models. Required.""" @overload @@ -1995,10 +2148,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentIntelligenceWarning(_model_base.Model): +class DocumentIntelligenceWarning(_Model): """The error object. - :ivar code: One of a server-defined set of warning codes. Required. :vartype code: str :ivar message: A human-readable representation of the warning. Required. @@ -2007,11 +2159,11 @@ class DocumentIntelligenceWarning(_model_base.Model): :vartype target: str """ - code: str = rest_field() + code: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """One of a server-defined set of warning codes. Required.""" - message: str = rest_field() + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A human-readable representation of the warning. Required.""" - target: Optional[str] = rest_field() + target: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The target of the error.""" @overload @@ -2034,10 +2186,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentKeyValueElement(_model_base.Model): +class DocumentKeyValueElement(_Model): """An object representing the field key or value in a key-value pair. - :ivar content: Concatenated content of the key-value element in reading order. Required. :vartype content: str :ivar bounding_regions: Bounding regions covering the key-value element. @@ -2047,11 +2198,13 @@ class DocumentKeyValueElement(_model_base.Model): :vartype spans: list[~azure.ai.documentintelligence.models.DocumentSpan] """ - content: str = rest_field() + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Concatenated content of the key-value element in reading order. Required.""" - bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field(name="boundingRegions") + bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field( + name="boundingRegions", visibility=["read", "create", "update", "delete", "query"] + ) """Bounding regions covering the key-value element.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the key-value element in the reading order concatenated content. Required.""" @overload @@ -2074,11 +2227,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentKeyValuePair(_model_base.Model): +class DocumentKeyValuePair(_Model): """An object representing a form field with distinct field label (key) and field value (may be empty). - :ivar key: Field label of the key-value pair. Required. :vartype key: ~azure.ai.documentintelligence.models.DocumentKeyValueElement :ivar value: Field value of the key-value pair. @@ -2087,11 +2239,13 @@ class DocumentKeyValuePair(_model_base.Model): :vartype confidence: float """ - key: "_models.DocumentKeyValueElement" = rest_field() + key: "_models.DocumentKeyValueElement" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Field label of the key-value pair. Required.""" - value: Optional["_models.DocumentKeyValueElement"] = rest_field() + value: Optional["_models.DocumentKeyValueElement"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Field value of the key-value pair.""" - confidence: float = rest_field() + confidence: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Confidence of correctly extracting the key-value pair. Required.""" @overload @@ -2114,10 +2268,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentLanguage(_model_base.Model): +class DocumentLanguage(_Model): """An object representing the detected language for a given text span. - :ivar locale: Detected language. Value may an ISO 639-1 language code (ex. "en", "fr") or BCP 47 language tag (ex. "zh-Hans"). Required. :vartype locale: str @@ -2128,13 +2281,13 @@ class DocumentLanguage(_model_base.Model): :vartype confidence: float """ - locale: str = rest_field() + locale: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Detected language. Value may an ISO 639-1 language code (ex. \"en\", \"fr\") or BCP 47 language tag (ex. \"zh-Hans\"). Required.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the text elements in the concatenated content the language applies to. Required.""" - confidence: float = rest_field() + confidence: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Confidence of correctly identifying the language. Required.""" @overload @@ -2157,11 +2310,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentLine(_model_base.Model): +class DocumentLine(_Model): """A content line object consisting of an adjacent sequence of content elements, such as words and selection marks. - :ivar content: Concatenated content of the contained elements in reading order. Required. :vartype content: str :ivar polygon: Bounding polygon of the line, with coordinates specified relative to the @@ -2173,14 +2325,14 @@ class DocumentLine(_model_base.Model): :vartype spans: list[~azure.ai.documentintelligence.models.DocumentSpan] """ - content: str = rest_field() + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Concatenated content of the contained elements in reading order. Required.""" - polygon: Optional[List[float]] = rest_field() + polygon: Optional[List[float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Bounding polygon of the line, with coordinates specified relative to the top-left of the page. The numbers represent the x, y values of the polygon vertices, clockwise from the left (-180 degrees inclusive) relative to the element orientation.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the line in the reading order concatenated content. Required.""" @overload @@ -2206,7 +2358,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class DocumentModelBuildOperationDetails(DocumentIntelligenceOperationDetails, discriminator="documentModelBuild"): """Get Operation response object. - :ivar operation_id: Operation ID. Required. :vartype operation_id: str :ivar status: Operation status. notStarted, running, completed, or failed. Required. Known @@ -2233,9 +2384,11 @@ class DocumentModelBuildOperationDetails(DocumentIntelligenceOperationDetails, d :vartype kind: str or ~azure.ai.documentintelligence.models.DOCUMENT_MODEL_BUILD """ - result: Optional["_models.DocumentModelDetails"] = rest_field() + result: Optional["_models.DocumentModelDetails"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Operation result upon success.""" - kind: Literal[OperationKind.DOCUMENT_MODEL_BUILD] = rest_discriminator(name="kind") # type: ignore + kind: Literal[OperationKind.DOCUMENT_MODEL_BUILD] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Type of operation. Required. Build a new custom document model.""" @overload @@ -2268,7 +2421,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class DocumentModelComposeOperationDetails(DocumentIntelligenceOperationDetails, discriminator="documentModelCompose"): """Get Operation response object. - :ivar operation_id: Operation ID. Required. :vartype operation_id: str :ivar status: Operation status. notStarted, running, completed, or failed. Required. Known @@ -2296,9 +2448,11 @@ class DocumentModelComposeOperationDetails(DocumentIntelligenceOperationDetails, :vartype kind: str or ~azure.ai.documentintelligence.models.DOCUMENT_MODEL_COMPOSE """ - result: Optional["_models.DocumentModelDetails"] = rest_field() + result: Optional["_models.DocumentModelDetails"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Operation result upon success.""" - kind: Literal[OperationKind.DOCUMENT_MODEL_COMPOSE] = rest_discriminator(name="kind") # type: ignore + kind: Literal[OperationKind.DOCUMENT_MODEL_COMPOSE] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Type of operation. Required. Compose a new custom document model from existing models.""" @overload @@ -2331,7 +2485,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class DocumentModelCopyToOperationDetails(DocumentIntelligenceOperationDetails, discriminator="documentModelCopyTo"): """Get Operation response object. - :ivar operation_id: Operation ID. Required. :vartype operation_id: str :ivar status: Operation status. notStarted, running, completed, or failed. Required. Known @@ -2360,9 +2513,11 @@ class DocumentModelCopyToOperationDetails(DocumentIntelligenceOperationDetails, :vartype kind: str or ~azure.ai.documentintelligence.models.DOCUMENT_MODEL_COPY_TO """ - result: Optional["_models.DocumentModelDetails"] = rest_field() + result: Optional["_models.DocumentModelDetails"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Operation result upon success.""" - kind: Literal[OperationKind.DOCUMENT_MODEL_COPY_TO] = rest_discriminator(name="kind") # type: ignore + kind: Literal[OperationKind.DOCUMENT_MODEL_COPY_TO] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Type of operation. Required. Copy an existing document model to potentially a different resource, region, or subscription.""" @@ -2394,12 +2549,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, kind=OperationKind.DOCUMENT_MODEL_COPY_TO, **kwargs) -class DocumentModelDetails(_model_base.Model): +class DocumentModelDetails(_Model): """Document model info. - Readonly variables are only populated by the server, and will be ignored when sending a request. - - :ivar model_id: Unique document model name. Required. :vartype model_id: str :ivar description: Document model description. @@ -2440,7 +2592,7 @@ class DocumentModelDetails(_model_base.Model): model_id: str = rest_field(name="modelId", visibility=["read", "create"]) """Unique document model name. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Document model description.""" created_date_time: datetime.datetime = rest_field(name="createdDateTime", visibility=["read"], format="rfc3339") """Date and time (UTC) when the document model was created. Required.""" @@ -2454,7 +2606,7 @@ class DocumentModelDetails(_model_base.Model): """Date and time (UTC) when the document model was last modified.""" api_version: Optional[str] = rest_field(name="apiVersion", visibility=["read"]) """API version used to create this document model.""" - tags: Optional[Dict[str, str]] = rest_field() + tags: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of key-value tag attributes associated with the document model.""" build_mode: Optional[Union[str, "_models.DocumentBuildMode"]] = rest_field(name="buildMode", visibility=["read"]) """Custom document model build mode. Known values are: \"template\" and \"neural\".""" @@ -2468,9 +2620,13 @@ class DocumentModelDetails(_model_base.Model): ) """Azure Blob Storage file list specifying the training data. Either azureBlobSource or azureBlobFileListSource must be specified.""" - classifier_id: Optional[str] = rest_field(name="classifierId") + classifier_id: Optional[str] = rest_field( + name="classifierId", visibility=["read", "create", "update", "delete", "query"] + ) """For composed models, the custom classifier to split and classify the input file.""" - split: Optional[Union[str, "_models.SplitMode"]] = rest_field() + split: Optional[Union[str, "_models.SplitMode"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """For composed models, the file splitting behavior. Known values are: \"auto\", \"none\", and \"perPage\".""" doc_types: Optional[Dict[str, "_models.DocumentTypeDetails"]] = rest_field(name="docTypes", visibility=["read"]) @@ -2502,10 +2658,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentPage(_model_base.Model): +class DocumentPage(_Model): """Content and layout elements extracted from a page from the input. - :ivar page_number: 1-based page number in the input document. Required. :vartype page_number: int :ivar angle: The general orientation of the content in clockwise direction, measured in @@ -2533,30 +2688,42 @@ class DocumentPage(_model_base.Model): :vartype formulas: list[~azure.ai.documentintelligence.models.DocumentFormula] """ - page_number: int = rest_field(name="pageNumber") + page_number: int = rest_field(name="pageNumber", visibility=["read", "create", "update", "delete", "query"]) """1-based page number in the input document. Required.""" - angle: Optional[float] = rest_field() + angle: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The general orientation of the content in clockwise direction, measured in degrees between (-180, 180].""" - width: Optional[float] = rest_field() + width: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The width of the image/PDF in pixels/inches, respectively.""" - height: Optional[float] = rest_field() + height: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The height of the image/PDF in pixels/inches, respectively.""" - unit: Optional[Union[str, "_models.LengthUnit"]] = rest_field() + unit: Optional[Union[str, "_models.LengthUnit"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The unit used by the width, height, and polygon properties. For images, the unit is \"pixel\". For PDF, the unit is \"inch\". Known values are: \"pixel\" and \"inch\".""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the page in the reading order concatenated content. Required.""" - words: Optional[List["_models.DocumentWord"]] = rest_field() + words: Optional[List["_models.DocumentWord"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted words from the page.""" - selection_marks: Optional[List["_models.DocumentSelectionMark"]] = rest_field(name="selectionMarks") + selection_marks: Optional[List["_models.DocumentSelectionMark"]] = rest_field( + name="selectionMarks", visibility=["read", "create", "update", "delete", "query"] + ) """Extracted selection marks from the page.""" - lines: Optional[List["_models.DocumentLine"]] = rest_field() + lines: Optional[List["_models.DocumentLine"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted lines from the page, potentially containing both textual and visual elements.""" - barcodes: Optional[List["_models.DocumentBarcode"]] = rest_field() + barcodes: Optional[List["_models.DocumentBarcode"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted barcodes from the page.""" - formulas: Optional[List["_models.DocumentFormula"]] = rest_field() + formulas: Optional[List["_models.DocumentFormula"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Extracted formulas from the page.""" @overload @@ -2587,11 +2754,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentParagraph(_model_base.Model): +class DocumentParagraph(_Model): """A paragraph object consisting with contiguous lines generally with common alignment and spacing. - :ivar role: Semantic role of the paragraph. Known values are: "pageHeader", "pageFooter", "pageNumber", "title", "sectionHeading", "footnote", and "formulaBlock". :vartype role: str or ~azure.ai.documentintelligence.models.ParagraphRole @@ -2603,14 +2769,18 @@ class DocumentParagraph(_model_base.Model): :vartype spans: list[~azure.ai.documentintelligence.models.DocumentSpan] """ - role: Optional[Union[str, "_models.ParagraphRole"]] = rest_field() + role: Optional[Union[str, "_models.ParagraphRole"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Semantic role of the paragraph. Known values are: \"pageHeader\", \"pageFooter\", \"pageNumber\", \"title\", \"sectionHeading\", \"footnote\", and \"formulaBlock\".""" - content: str = rest_field() + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Concatenated content of the paragraph in reading order. Required.""" - bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field(name="boundingRegions") + bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field( + name="boundingRegions", visibility=["read", "create", "update", "delete", "query"] + ) """Bounding regions covering the paragraph.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the paragraph in the reading order concatenated content. Required.""" @overload @@ -2634,19 +2804,18 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentSection(_model_base.Model): +class DocumentSection(_Model): """An object representing a section in the document. - :ivar spans: Location of the section in the reading order concatenated content. Required. :vartype spans: list[~azure.ai.documentintelligence.models.DocumentSpan] :ivar elements: Child elements of the section. :vartype elements: list[str] """ - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the section in the reading order concatenated content. Required.""" - elements: Optional[List[str]] = rest_field() + elements: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Child elements of the section.""" @overload @@ -2668,11 +2837,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentSelectionMark(_model_base.Model): +class DocumentSelectionMark(_Model): """A selection mark object representing check boxes, radio buttons, and other elements indicating a selection. - :ivar state: State of the selection mark. Required. Known values are: "selected" and "unselected". :vartype state: str or ~azure.ai.documentintelligence.models.DocumentSelectionMarkState @@ -2687,16 +2855,18 @@ class DocumentSelectionMark(_model_base.Model): :vartype confidence: float """ - state: Union[str, "_models.DocumentSelectionMarkState"] = rest_field() + state: Union[str, "_models.DocumentSelectionMarkState"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """State of the selection mark. Required. Known values are: \"selected\" and \"unselected\".""" - polygon: Optional[List[float]] = rest_field() + polygon: Optional[List[float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Bounding polygon of the selection mark, with coordinates specified relative to the top-left of the page. The numbers represent the x, y values of the polygon vertices, clockwise from the left (-180 degrees inclusive) relative to the element orientation.""" - span: "_models.DocumentSpan" = rest_field() + span: "_models.DocumentSpan" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the selection mark in the reading order concatenated content. Required.""" - confidence: float = rest_field() + confidence: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Confidence of correctly extracting the selection mark. Required.""" @overload @@ -2720,20 +2890,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentSpan(_model_base.Model): +class DocumentSpan(_Model): """Contiguous region of the concatenated content property, specified as an offset and length. - :ivar offset: Zero-based index of the content represented by the span. Required. :vartype offset: int :ivar length: Number of characters in the content represented by the span. Required. :vartype length: int """ - offset: int = rest_field() + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Zero-based index of the content represented by the span. Required.""" - length: int = rest_field() + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Number of characters in the content represented by the span. Required.""" @overload @@ -2755,10 +2924,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentStyle(_model_base.Model): +class DocumentStyle(_Model): """An object representing observed text styles. - :ivar is_handwritten: Is content handwritten?. :vartype is_handwritten: bool :ivar similar_font_family: Visually most similar font from among the set of supported font @@ -2780,22 +2948,32 @@ class DocumentStyle(_model_base.Model): :vartype confidence: float """ - is_handwritten: Optional[bool] = rest_field(name="isHandwritten") + is_handwritten: Optional[bool] = rest_field( + name="isHandwritten", visibility=["read", "create", "update", "delete", "query"] + ) """Is content handwritten?.""" - similar_font_family: Optional[str] = rest_field(name="similarFontFamily") + similar_font_family: Optional[str] = rest_field( + name="similarFontFamily", visibility=["read", "create", "update", "delete", "query"] + ) """Visually most similar font from among the set of supported font families, with fallback fonts following CSS convention (ex. 'Arial, sans-serif').""" - font_style: Optional[Union[str, "_models.DocumentFontStyle"]] = rest_field(name="fontStyle") + font_style: Optional[Union[str, "_models.DocumentFontStyle"]] = rest_field( + name="fontStyle", visibility=["read", "create", "update", "delete", "query"] + ) """Font style. Known values are: \"normal\" and \"italic\".""" - font_weight: Optional[Union[str, "_models.DocumentFontWeight"]] = rest_field(name="fontWeight") + font_weight: Optional[Union[str, "_models.DocumentFontWeight"]] = rest_field( + name="fontWeight", visibility=["read", "create", "update", "delete", "query"] + ) """Font weight. Known values are: \"normal\" and \"bold\".""" - color: Optional[str] = rest_field() + color: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Foreground color in #rrggbb hexadecimal format.""" - background_color: Optional[str] = rest_field(name="backgroundColor") + background_color: Optional[str] = rest_field( + name="backgroundColor", visibility=["read", "create", "update", "delete", "query"] + ) """Background color in #rrggbb hexadecimal format..""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the text elements in the concatenated content the style applies to. Required.""" - confidence: float = rest_field() + confidence: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Confidence of correctly identifying the style. Required.""" @overload @@ -2823,10 +3001,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentTable(_model_base.Model): +class DocumentTable(_Model): """A table object consisting table cells arranged in a rectangular layout. - :ivar row_count: Number of rows in the table. Required. :vartype row_count: int :ivar column_count: Number of columns in the table. Required. @@ -2843,19 +3020,25 @@ class DocumentTable(_model_base.Model): :vartype footnotes: list[~azure.ai.documentintelligence.models.DocumentFootnote] """ - row_count: int = rest_field(name="rowCount") + row_count: int = rest_field(name="rowCount", visibility=["read", "create", "update", "delete", "query"]) """Number of rows in the table. Required.""" - column_count: int = rest_field(name="columnCount") + column_count: int = rest_field(name="columnCount", visibility=["read", "create", "update", "delete", "query"]) """Number of columns in the table. Required.""" - cells: List["_models.DocumentTableCell"] = rest_field() + cells: List["_models.DocumentTableCell"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Cells contained within the table. Required.""" - bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field(name="boundingRegions") + bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field( + name="boundingRegions", visibility=["read", "create", "update", "delete", "query"] + ) """Bounding regions covering the table.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the table in the reading order concatenated content. Required.""" - caption: Optional["_models.DocumentCaption"] = rest_field() + caption: Optional["_models.DocumentCaption"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Caption associated with the table.""" - footnotes: Optional[List["_models.DocumentFootnote"]] = rest_field() + footnotes: Optional[List["_models.DocumentFootnote"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """List of footnotes associated with the table.""" @overload @@ -2882,10 +3065,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentTableCell(_model_base.Model): +class DocumentTableCell(_Model): """An object representing the location and content of a table cell. - :ivar kind: Table cell kind. Known values are: "content", "rowHeader", "columnHeader", "stubHead", and "description". :vartype kind: str or ~azure.ai.documentintelligence.models.DocumentTableCellKind @@ -2907,24 +3089,30 @@ class DocumentTableCell(_model_base.Model): :vartype elements: list[str] """ - kind: Optional[Union[str, "_models.DocumentTableCellKind"]] = rest_field() + kind: Optional[Union[str, "_models.DocumentTableCellKind"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Table cell kind. Known values are: \"content\", \"rowHeader\", \"columnHeader\", \"stubHead\", and \"description\".""" - row_index: int = rest_field(name="rowIndex") + row_index: int = rest_field(name="rowIndex", visibility=["read", "create", "update", "delete", "query"]) """Row index of the cell. Required.""" - column_index: int = rest_field(name="columnIndex") + column_index: int = rest_field(name="columnIndex", visibility=["read", "create", "update", "delete", "query"]) """Column index of the cell. Required.""" - row_span: Optional[int] = rest_field(name="rowSpan") + row_span: Optional[int] = rest_field(name="rowSpan", visibility=["read", "create", "update", "delete", "query"]) """Number of rows spanned by this cell.""" - column_span: Optional[int] = rest_field(name="columnSpan") + column_span: Optional[int] = rest_field( + name="columnSpan", visibility=["read", "create", "update", "delete", "query"] + ) """Number of columns spanned by this cell.""" - content: str = rest_field() + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Concatenated content of the table cell in reading order. Required.""" - bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field(name="boundingRegions") + bounding_regions: Optional[List["_models.BoundingRegion"]] = rest_field( + name="boundingRegions", visibility=["read", "create", "update", "delete", "query"] + ) """Bounding regions covering the table cell.""" - spans: List["_models.DocumentSpan"] = rest_field() + spans: List["_models.DocumentSpan"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the table cell in the reading order concatenated content. Required.""" - elements: Optional[List[str]] = rest_field() + elements: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Child elements of the table cell.""" @overload @@ -2953,7 +3141,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentTypeDetails(_model_base.Model): +class DocumentTypeDetails(_Model): """Document type info. :ivar description: Document model description. @@ -2978,23 +3166,37 @@ class DocumentTypeDetails(_model_base.Model): :vartype max_documents_to_analyze: int """ - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Document model description.""" - build_mode: Optional[Union[str, "_models.DocumentBuildMode"]] = rest_field(name="buildMode") + build_mode: Optional[Union[str, "_models.DocumentBuildMode"]] = rest_field( + name="buildMode", visibility=["read", "create", "update", "delete", "query"] + ) """Custom document model build mode. Known values are: \"template\" and \"neural\".""" - field_schema: Optional[Dict[str, "_models.DocumentFieldSchema"]] = rest_field(name="fieldSchema") + field_schema: Optional[Dict[str, "_models.DocumentFieldSchema"]] = rest_field( + name="fieldSchema", visibility=["read", "create", "update", "delete", "query"] + ) """Description of the document semantic schema using a JSON Schema style syntax.""" - field_confidence: Optional[Dict[str, float]] = rest_field(name="fieldConfidence") + field_confidence: Optional[Dict[str, float]] = rest_field( + name="fieldConfidence", visibility=["read", "create", "update", "delete", "query"] + ) """Estimated confidence for each field.""" - model_id: Optional[str] = rest_field(name="modelId") + model_id: Optional[str] = rest_field(name="modelId", visibility=["read", "create", "update", "delete", "query"]) """Document model to use for analyzing documents with specified type.""" - confidence_threshold: Optional[float] = rest_field(name="confidenceThreshold") + confidence_threshold: Optional[float] = rest_field( + name="confidenceThreshold", visibility=["read", "create", "update", "delete", "query"] + ) """Only perform analysis if docType confidence is above threshold.""" - features: Optional[List[Union[str, "_models.DocumentAnalysisFeature"]]] = rest_field() + features: Optional[List[Union[str, "_models.DocumentAnalysisFeature"]]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """List of optional analysis features.""" - query_fields: Optional[List[str]] = rest_field(name="queryFields") + query_fields: Optional[List[str]] = rest_field( + name="queryFields", visibility=["read", "create", "update", "delete", "query"] + ) """List of additional fields to extract. Ex. \"NumberOfGuests,StoreNumber\".""" - max_documents_to_analyze: Optional[int] = rest_field(name="maxDocumentsToAnalyze") + max_documents_to_analyze: Optional[int] = rest_field( + name="maxDocumentsToAnalyze", visibility=["read", "create", "update", "delete", "query"] + ) """Maximum number of documents of specified type to analyze. Default=all.""" @overload @@ -3023,12 +3225,11 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DocumentWord(_model_base.Model): +class DocumentWord(_Model): """A word object consisting of a contiguous sequence of characters. For non-space delimited languages, such as Chinese, Japanese, and Korean, each character is represented as its own word. - :ivar content: Text content of the word. Required. :vartype content: str :ivar polygon: Bounding polygon of the word, with coordinates specified relative to the @@ -3042,16 +3243,16 @@ class DocumentWord(_model_base.Model): :vartype confidence: float """ - content: str = rest_field() + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Text content of the word. Required.""" - polygon: Optional[List[float]] = rest_field() + polygon: Optional[List[float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Bounding polygon of the word, with coordinates specified relative to the top-left of the page. The numbers represent the x, y values of the polygon vertices, clockwise from the left (-180 degrees inclusive) relative to the element orientation.""" - span: "_models.DocumentSpan" = rest_field() + span: "_models.DocumentSpan" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Location of the word in the reading order concatenated content. Required.""" - confidence: float = rest_field() + confidence: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Confidence of correctly extracting the word. Required.""" @overload @@ -3075,11 +3276,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ModelCopyAuthorization(_model_base.Model): +class ModelCopyAuthorization(_Model): """Authorization to copy a document model to the specified target resource and modelId. - :ivar target_resource_id: ID of the target Azure resource where the document model should be copied to. Required. :vartype target_resource_id: str @@ -3097,18 +3297,26 @@ class ModelCopyAuthorization(_model_base.Model): :vartype expiration_date_time: ~datetime.datetime """ - target_resource_id: str = rest_field(name="targetResourceId") + target_resource_id: str = rest_field( + name="targetResourceId", visibility=["read", "create", "update", "delete", "query"] + ) """ID of the target Azure resource where the document model should be copied to. Required.""" - target_resource_region: str = rest_field(name="targetResourceRegion") + target_resource_region: str = rest_field( + name="targetResourceRegion", visibility=["read", "create", "update", "delete", "query"] + ) """Location of the target Azure resource where the document model should be copied to. Required.""" - target_model_id: str = rest_field(name="targetModelId") + target_model_id: str = rest_field(name="targetModelId", visibility=["read", "create", "update", "delete", "query"]) """Identifier of the target document model. Required.""" - target_model_location: str = rest_field(name="targetModelLocation") + target_model_location: str = rest_field( + name="targetModelLocation", visibility=["read", "create", "update", "delete", "query"] + ) """URL of the copied document model in the target account. Required.""" - access_token: str = rest_field(name="accessToken") + access_token: str = rest_field(name="accessToken", visibility=["read", "create", "update", "delete", "query"]) """Token used to authorize the request. Required.""" - expiration_date_time: datetime.datetime = rest_field(name="expirationDateTime", format="rfc3339") + expiration_date_time: datetime.datetime = rest_field( + name="expirationDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """Date/time when the access token expires. Required.""" @overload diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_patch.py b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_patch.py index f7dd32510333..8bcb627aa475 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_patch.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/azure/ai/documentintelligence/models/_patch.py @@ -1,7 +1,8 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_analyze_batch_documents_result.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_analyze_batch_documents_result.py new file mode 100644 index 000000000000..56b2c751607f --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_analyze_batch_documents_result.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python delete_analyze_batch_documents_result.py +""" + + +def main(): + client = DocumentIntelligenceClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + client.delete_analyze_batch_result( + model_id="prebuilt-invoice", + result_id="3b31320d-8bab-4f88-b19c-2322a7f11034", + ) + + +# x-ms-original-file: 2025-09-01/DeleteAnalyzeBatchDocumentsResult.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_analyze_document_result.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_analyze_document_result.py new file mode 100644 index 000000000000..8913c87b0d67 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_analyze_document_result.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python delete_analyze_document_result.py +""" + + +def main(): + client = DocumentIntelligenceClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + client.delete_analyze_result( + model_id="myCustomModel", + result_id="3b31320d-8bab-4f88-b19c-2322a7f11034", + ) + + +# x-ms-original-file: 2025-09-01/DeleteAnalyzeDocumentResult.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_document_classifier.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_document_classifier.py new file mode 100644 index 000000000000..a52fb92ef21d --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_document_classifier.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python delete_document_classifier.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + client.delete_classifier( + classifier_id="myClassifier", + ) + + +# x-ms-original-file: 2025-09-01/DeleteDocumentClassifier.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_document_model.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_document_model.py new file mode 100644 index 000000000000..fd0d8d10f887 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/delete_document_model.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python delete_document_model.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + client.delete_model( + model_id="myCustomModel", + ) + + +# x-ms-original-file: 2025-09-01/DeleteDocumentModel.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_batch_documents_result.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_batch_documents_result.py new file mode 100644 index 000000000000..f89b399cc176 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_batch_documents_result.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_analyze_batch_documents_result.py +""" + + +def main(): + client = DocumentIntelligenceClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.get_analyze_batch_result( + model_id="prebuilt-invoice", + result_id="3b31320d-8bab-4f88-b19c-2322a7f11034", + ) + print(response) + + +# x-ms-original-file: 2025-09-01/GetAnalyzeBatchDocumentsResult.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_batch_documents_results.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_batch_documents_results.py new file mode 100644 index 000000000000..03095bb96914 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_batch_documents_results.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_analyze_batch_documents_results.py +""" + + +def main(): + client = DocumentIntelligenceClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.list_analyze_batch_results( + model_id="prebuilt-invoice", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2025-09-01/GetAnalyzeBatchDocumentsResults.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_figure.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_figure.py new file mode 100644 index 000000000000..60f6c0e9f656 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_figure.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_analyze_document_result_figure.py +""" + + +def main(): + client = DocumentIntelligenceClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.get_analyze_result_figure( + model_id="prebuilt-invoice", + result_id="3b31320d-8bab-4f88-b19c-2322a7f11034", + figure_id="1.0", + ) + print(response) + + +# x-ms-original-file: 2025-09-01/GetAnalyzeDocumentResultFigure.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_fpdf.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_fpdf.py new file mode 100644 index 000000000000..cfc7b467ab1b --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_fpdf.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_analyze_document_result_fpdf.py +""" + + +def main(): + client = DocumentIntelligenceClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.get_analyze_result_pdf( + model_id="prebuilt-invoice", + result_id="3b31320d-8bab-4f88-b19c-2322a7f11034", + ) + print(response) + + +# x-ms-original-file: 2025-09-01/GetAnalyzeDocumentResultFPdf.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_png.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_png.py new file mode 100644 index 000000000000..6a948c9933c0 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_analyze_document_result_png.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_analyze_document_result_png.py +""" + + +def main(): + client = DocumentIntelligenceClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.get_analyze_result_png( + model_id="prebuilt-invoice", + result_id="3b31320d-8bab-4f88-b19c-2322a7f11034", + ) + print(response) + + +# x-ms-original-file: 2025-09-01/GetAnalyzeDocumentResultPng.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_classifier.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_classifier.py new file mode 100644 index 000000000000..8bbac994af57 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_classifier.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_document_classifier.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.get_classifier( + classifier_id="myClassifier", + ) + print(response) + + +# x-ms-original-file: 2025-09-01/GetDocumentClassifier.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_classifiers.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_classifiers.py new file mode 100644 index 000000000000..f0cfdacb247f --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_classifiers.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_document_classifiers.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.list_classifiers() + for item in response: + print(item) + + +# x-ms-original-file: 2025-09-01/GetDocumentClassifiers.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_model_custom.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_model_custom.py new file mode 100644 index 000000000000..89fedb080c63 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_model_custom.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_document_model_custom.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.get_model( + model_id="myCustomModel", + ) + print(response) + + +# x-ms-original-file: 2025-09-01/GetDocumentModel_Custom.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_model_prebuilt.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_model_prebuilt.py new file mode 100644 index 000000000000..582dbeb3e465 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_model_prebuilt.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_document_model_prebuilt.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.get_model( + model_id="prebuilt-invoice", + ) + print(response) + + +# x-ms-original-file: 2025-09-01/GetDocumentModel_Prebuilt.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_models.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_models.py new file mode 100644 index 000000000000..78e5351945bc --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_document_models.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_document_models.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.list_models() + for item in response: + print(item) + + +# x-ms-original-file: 2025-09-01/GetDocumentModels.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_operation.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_operation.py new file mode 100644 index 000000000000..413399ae0c0f --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_operation.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_operation.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.get_operation( + operation_id="b704bb00-d130-4f3f-a1d8-ca96de3eabb4", + ) + print(response) + + +# x-ms-original-file: 2025-09-01/GetOperation.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_operations.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_operations.py new file mode 100644 index 000000000000..13cd0a6fe916 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_operations.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_operations.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.list_operations() + for item in response: + print(item) + + +# x-ms-original-file: 2025-09-01/GetOperations.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_resource_details.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_resource_details.py new file mode 100644 index 000000000000..f20054b16b28 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_samples/get_resource_details.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient + +""" +# PREREQUISITES + pip install azure-ai-documentintelligence +# USAGE + python get_resource_details.py +""" + + +def main(): + client = DocumentIntelligenceAdministrationClient( + endpoint="https://myendpoint.cognitiveservices.azure.com", + credential="CREDENTIAL", + ) + + response = client.get_resource_details() + print(response) + + +# x-ms-original-file: 2025-09-01/GetResourceDetails.json +if __name__ == "__main__": + main() diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/conftest.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/conftest.py new file mode 100644 index 000000000000..d647c5afe1fc --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/conftest.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import os +import pytest +from dotenv import load_dotenv +from devtools_testutils import ( + test_proxy, + add_general_regex_sanitizer, + add_body_key_sanitizer, + add_header_regex_sanitizer, +) + +load_dotenv() + + +# For security, please avoid record sensitive identity information in recordings +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + documentintelligence_subscription_id = os.environ.get( + "DOCUMENTINTELLIGENCE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000" + ) + documentintelligence_tenant_id = os.environ.get( + "DOCUMENTINTELLIGENCE_TENANT_ID", "00000000-0000-0000-0000-000000000000" + ) + documentintelligence_client_id = os.environ.get( + "DOCUMENTINTELLIGENCE_CLIENT_ID", "00000000-0000-0000-0000-000000000000" + ) + documentintelligence_client_secret = os.environ.get( + "DOCUMENTINTELLIGENCE_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer( + regex=documentintelligence_subscription_id, value="00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer(regex=documentintelligence_tenant_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=documentintelligence_client_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=documentintelligence_client_secret, value="00000000-0000-0000-0000-000000000000") + + documentintelligenceadministration_subscription_id = os.environ.get( + "DOCUMENTINTELLIGENCEADMINISTRATION_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000" + ) + documentintelligenceadministration_tenant_id = os.environ.get( + "DOCUMENTINTELLIGENCEADMINISTRATION_TENANT_ID", "00000000-0000-0000-0000-000000000000" + ) + documentintelligenceadministration_client_id = os.environ.get( + "DOCUMENTINTELLIGENCEADMINISTRATION_CLIENT_ID", "00000000-0000-0000-0000-000000000000" + ) + documentintelligenceadministration_client_secret = os.environ.get( + "DOCUMENTINTELLIGENCEADMINISTRATION_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer( + regex=documentintelligenceadministration_subscription_id, value="00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer( + regex=documentintelligenceadministration_tenant_id, value="00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer( + regex=documentintelligenceadministration_client_id, value="00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer( + regex=documentintelligenceadministration_client_secret, value="00000000-0000-0000-0000-000000000000" + ) + + add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") + add_header_regex_sanitizer(key="Cookie", value="cookie;") + add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence.py new file mode 100644 index 000000000000..4048cea1f6c0 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence.py @@ -0,0 +1,139 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import DocumentIntelligenceClientTestBase, DocumentIntelligencePreparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestDocumentIntelligence(DocumentIntelligenceClientTestBase): + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_begin_analyze_document(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.begin_analyze_document( + model_id="str", + body={"base64Source": bytes("bytes", encoding="utf-8"), "urlSource": "str"}, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_get_analyze_result_pdf(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.get_analyze_result_pdf( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_get_analyze_result_png(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.get_analyze_result_png( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_get_analyze_result_figure(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.get_analyze_result_figure( + model_id="str", + result_id="str", + figure_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_delete_analyze_result(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.delete_analyze_result( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_begin_analyze_batch_documents(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.begin_analyze_batch_documents( + model_id="str", + body={ + "resultContainerUrl": "str", + "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"}, + "azureBlobSource": {"containerUrl": "str", "prefix": "str"}, + "overwriteExisting": bool, + "resultPrefix": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_list_analyze_batch_results(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.list_analyze_batch_results( + model_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_delete_analyze_batch_result(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.delete_analyze_batch_result( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_get_analyze_batch_result(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.get_analyze_batch_result( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy + def test_begin_classify_document(self, documentintelligence_endpoint): + client = self.create_client(endpoint=documentintelligence_endpoint) + response = client.begin_classify_document( + classifier_id="str", + body={"base64Source": bytes("bytes", encoding="utf-8"), "urlSource": "str"}, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration.py new file mode 100644 index 000000000000..e1e2126bb6b6 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration.py @@ -0,0 +1,245 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import DocumentIntelligenceAdministrationClientTestBase, DocumentIntelligenceAdministrationPreparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestDocumentIntelligenceAdministration(DocumentIntelligenceAdministrationClientTestBase): + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_begin_build_document_model(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.begin_build_document_model( + body={ + "buildMode": "str", + "modelId": "str", + "allowOverwrite": bool, + "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"}, + "azureBlobSource": {"containerUrl": "str", "prefix": "str"}, + "description": "str", + "maxTrainingHours": 0.0, + "tags": {"str": "str"}, + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_begin_compose_model(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.begin_compose_model( + body={ + "classifierId": "str", + "docTypes": { + "str": { + "buildMode": "str", + "confidenceThreshold": 0.0, + "description": "str", + "features": ["str"], + "fieldConfidence": {"str": 0.0}, + "fieldSchema": { + "str": { + "type": "str", + "description": "str", + "example": "str", + "items": ..., + "properties": {"str": ...}, + } + }, + "maxDocumentsToAnalyze": 0, + "modelId": "str", + "queryFields": ["str"], + } + }, + "modelId": "str", + "description": "str", + "split": "str", + "tags": {"str": "str"}, + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_authorize_model_copy(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.authorize_model_copy( + body={"modelId": "str", "description": "str", "tags": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_begin_copy_model_to(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.begin_copy_model_to( + model_id="str", + body={ + "accessToken": "str", + "expirationDateTime": "2020-02-20 00:00:00", + "targetModelId": "str", + "targetModelLocation": "str", + "targetResourceId": "str", + "targetResourceRegion": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_get_model(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.get_model( + model_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_list_models(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.list_models() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_delete_model(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.delete_model( + model_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_get_resource_details(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.get_resource_details() + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_get_operation(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.get_operation( + operation_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_list_operations(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.list_operations() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_begin_build_classifier(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.begin_build_classifier( + body={ + "classifierId": "str", + "docTypes": { + "str": { + "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"}, + "azureBlobSource": {"containerUrl": "str", "prefix": "str"}, + "sourceKind": "str", + } + }, + "allowOverwrite": bool, + "baseClassifierId": "str", + "description": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_authorize_classifier_copy(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.authorize_classifier_copy( + body={"classifierId": "str", "description": "str", "tags": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_begin_copy_classifier_to(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.begin_copy_classifier_to( + classifier_id="str", + body={ + "accessToken": "str", + "expirationDateTime": "2020-02-20 00:00:00", + "targetClassifierId": "str", + "targetClassifierLocation": "str", + "targetResourceId": "str", + "targetResourceRegion": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_get_classifier(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.get_classifier( + classifier_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_list_classifiers(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.list_classifiers() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy + def test_delete_classifier(self, documentintelligenceadministration_endpoint): + client = self.create_client(endpoint=documentintelligenceadministration_endpoint) + response = client.delete_classifier( + classifier_id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration_async.py new file mode 100644 index 000000000000..1522ffe7c182 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_administration_async.py @@ -0,0 +1,256 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import DocumentIntelligenceAdministrationPreparer +from testpreparer_async import DocumentIntelligenceAdministrationClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestDocumentIntelligenceAdministrationAsync(DocumentIntelligenceAdministrationClientTestBaseAsync): + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_begin_build_document_model(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await ( + await client.begin_build_document_model( + body={ + "buildMode": "str", + "modelId": "str", + "allowOverwrite": bool, + "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"}, + "azureBlobSource": {"containerUrl": "str", "prefix": "str"}, + "description": "str", + "maxTrainingHours": 0.0, + "tags": {"str": "str"}, + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_begin_compose_model(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await ( + await client.begin_compose_model( + body={ + "classifierId": "str", + "docTypes": { + "str": { + "buildMode": "str", + "confidenceThreshold": 0.0, + "description": "str", + "features": ["str"], + "fieldConfidence": {"str": 0.0}, + "fieldSchema": { + "str": { + "type": "str", + "description": "str", + "example": "str", + "items": ..., + "properties": {"str": ...}, + } + }, + "maxDocumentsToAnalyze": 0, + "modelId": "str", + "queryFields": ["str"], + } + }, + "modelId": "str", + "description": "str", + "split": "str", + "tags": {"str": "str"}, + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_authorize_model_copy(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await client.authorize_model_copy( + body={"modelId": "str", "description": "str", "tags": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_begin_copy_model_to(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await ( + await client.begin_copy_model_to( + model_id="str", + body={ + "accessToken": "str", + "expirationDateTime": "2020-02-20 00:00:00", + "targetModelId": "str", + "targetModelLocation": "str", + "targetResourceId": "str", + "targetResourceRegion": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_model(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await client.get_model( + model_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_list_models(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = client.list_models() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_delete_model(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await client.delete_model( + model_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_resource_details(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await client.get_resource_details() + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_operation(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await client.get_operation( + operation_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_list_operations(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = client.list_operations() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_begin_build_classifier(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await ( + await client.begin_build_classifier( + body={ + "classifierId": "str", + "docTypes": { + "str": { + "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"}, + "azureBlobSource": {"containerUrl": "str", "prefix": "str"}, + "sourceKind": "str", + } + }, + "allowOverwrite": bool, + "baseClassifierId": "str", + "description": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_authorize_classifier_copy(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await client.authorize_classifier_copy( + body={"classifierId": "str", "description": "str", "tags": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_begin_copy_classifier_to(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await ( + await client.begin_copy_classifier_to( + classifier_id="str", + body={ + "accessToken": "str", + "expirationDateTime": "2020-02-20 00:00:00", + "targetClassifierId": "str", + "targetClassifierLocation": "str", + "targetResourceId": "str", + "targetResourceRegion": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_classifier(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await client.get_classifier( + classifier_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_list_classifiers(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = client.list_classifiers() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @DocumentIntelligenceAdministrationPreparer() + @recorded_by_proxy_async + async def test_delete_classifier(self, documentintelligenceadministration_endpoint): + client = self.create_async_client(endpoint=documentintelligenceadministration_endpoint) + response = await client.delete_classifier( + classifier_id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_async.py new file mode 100644 index 000000000000..6cc54d1e90fb --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/test_document_intelligence_async.py @@ -0,0 +1,146 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import DocumentIntelligencePreparer +from testpreparer_async import DocumentIntelligenceClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestDocumentIntelligenceAsync(DocumentIntelligenceClientTestBaseAsync): + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_begin_analyze_document(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = await ( + await client.begin_analyze_document( + model_id="str", + body={"base64Source": bytes("bytes", encoding="utf-8"), "urlSource": "str"}, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_get_analyze_result_pdf(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = await client.get_analyze_result_pdf( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_get_analyze_result_png(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = await client.get_analyze_result_png( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_get_analyze_result_figure(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = await client.get_analyze_result_figure( + model_id="str", + result_id="str", + figure_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_delete_analyze_result(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = await client.delete_analyze_result( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_begin_analyze_batch_documents(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = await ( + await client.begin_analyze_batch_documents( + model_id="str", + body={ + "resultContainerUrl": "str", + "azureBlobFileListSource": {"containerUrl": "str", "fileList": "str"}, + "azureBlobSource": {"containerUrl": "str", "prefix": "str"}, + "overwriteExisting": bool, + "resultPrefix": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_list_analyze_batch_results(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = client.list_analyze_batch_results( + model_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_delete_analyze_batch_result(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = await client.delete_analyze_batch_result( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_get_analyze_batch_result(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = await client.get_analyze_batch_result( + model_id="str", + result_id="str", + ) + + # please add some check logic here by yourself + # ... + + @DocumentIntelligencePreparer() + @recorded_by_proxy_async + async def test_begin_classify_document(self, documentintelligence_endpoint): + client = self.create_async_client(endpoint=documentintelligence_endpoint) + response = await ( + await client.begin_classify_document( + classifier_id="str", + body={"base64Source": bytes("bytes", encoding="utf-8"), "urlSource": "str"}, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer.py new file mode 100644 index 000000000000..ed3b18488bf5 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.ai.documentintelligence import DocumentIntelligenceAdministrationClient, DocumentIntelligenceClient +from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer +import functools + + +class DocumentIntelligenceClientTestBase(AzureRecordedTestCase): + + def create_client(self, endpoint): + credential = self.get_credential(DocumentIntelligenceClient) + return self.create_client_from_credential( + DocumentIntelligenceClient, + credential=credential, + endpoint=endpoint, + ) + + +DocumentIntelligencePreparer = functools.partial( + PowerShellPreparer, + "documentintelligence", + documentintelligence_endpoint="https://fake_documentintelligence_endpoint.com", +) + + +class DocumentIntelligenceAdministrationClientTestBase(AzureRecordedTestCase): + + def create_client(self, endpoint): + credential = self.get_credential(DocumentIntelligenceAdministrationClient) + return self.create_client_from_credential( + DocumentIntelligenceAdministrationClient, + credential=credential, + endpoint=endpoint, + ) + + +DocumentIntelligenceAdministrationPreparer = functools.partial( + PowerShellPreparer, + "documentintelligenceadministration", + documentintelligenceadministration_endpoint="https://fake_documentintelligenceadministration_endpoint.com", +) diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer_async.py new file mode 100644 index 000000000000..fece6c220e81 --- /dev/null +++ b/sdk/documentintelligence/azure-ai-documentintelligence/generated_tests/testpreparer_async.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.ai.documentintelligence.aio import DocumentIntelligenceAdministrationClient, DocumentIntelligenceClient +from devtools_testutils import AzureRecordedTestCase + + +class DocumentIntelligenceClientTestBaseAsync(AzureRecordedTestCase): + + def create_async_client(self, endpoint): + credential = self.get_credential(DocumentIntelligenceClient, is_async=True) + return self.create_client_from_credential( + DocumentIntelligenceClient, + credential=credential, + endpoint=endpoint, + ) + + +class DocumentIntelligenceAdministrationClientTestBaseAsync(AzureRecordedTestCase): + + def create_async_client(self, endpoint): + credential = self.get_credential(DocumentIntelligenceAdministrationClient, is_async=True) + return self.create_client_from_credential( + DocumentIntelligenceAdministrationClient, + credential=credential, + endpoint=endpoint, + ) diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_highres_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_highres_async.py index 660b3443a143..993b355efb71 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_highres_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_highres_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_languages_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_languages_async.py index 10b37ca1c30f..e15f3b9088b5 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_languages_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_languages_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_query_fields_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_query_fields_async.py index 0d4c09816c06..618efedc4602 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_query_fields_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_addon_query_fields_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_custom_documents_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_custom_documents_async.py index 495538737d76..4083374a3a89 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_custom_documents_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_custom_documents_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_documents_output_in_markdown_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_documents_output_in_markdown_async.py index 43cdd0e4cc4e..530d757afaec 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_documents_output_in_markdown_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_documents_output_in_markdown_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_general_documents_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_general_documents_async.py index f077399a67a3..209bca6f121c 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_general_documents_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_general_documents_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_identity_documents_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_identity_documents_async.py index 19dea29eacca..a3913ffccf22 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_identity_documents_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_identity_documents_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_invoices_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_invoices_async.py index 4dfaeafcf081..a6f2b537c188 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_invoices_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_invoices_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_invoices_from_bytes_source_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_invoices_from_bytes_source_async.py index cf5eadb8f3ee..44a6c8a28c02 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_invoices_from_bytes_source_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_invoices_from_bytes_source_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_layout_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_layout_async.py index 249a235fdef0..a688fe3e0df8 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_layout_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_layout_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_read_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_read_async.py index 84f437be7303..7435695537e3 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_read_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_read_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_receipts_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_receipts_async.py index 76f583b8d066..220c23a99d73 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_receipts_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_receipts_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_receipts_from_url_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_receipts_from_url_async.py index c52fa267ea60..2fbf45381a28 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_receipts_from_url_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_receipts_from_url_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_tax_us_w2_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_tax_us_w2_async.py index c9a67860808e..691a4fb8a183 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_tax_us_w2_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_analyze_tax_us_w2_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_classify_document_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_classify_document_async.py index b7c8f5610cb5..c5d2db5402c4 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_classify_document_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_classify_document_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_compose_model_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_compose_model_async.py index 07f6647ac8fa..ace3c3a24ca1 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_compose_model_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_compose_model_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_copy_model_to_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_copy_model_to_async.py index de4af3f90530..c1fe59e5fd3b 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_copy_model_to_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_copy_model_to_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_get_raw_response_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_get_raw_response_async.py index a3642faf17ce..0dbd051a4ee2 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_get_raw_response_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_get_raw_response_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_manage_classifiers_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_manage_classifiers_async.py index edab9621b76a..bd54a0968725 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_manage_classifiers_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_manage_classifiers_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_manage_models_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_manage_models_async.py index d0464ca8cbd8..e7cc3b8d1f7c 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_manage_models_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_manage_models_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_send_request_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_send_request_async.py index 1446c45b4f98..9bdfeefd2369 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_send_request_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/async_samples/sample_send_request_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_highres.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_highres.py index 94464e725bd2..569b1afa48a0 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_highres.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_highres.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_languages.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_languages.py index d45201e56d99..ab46bc77126c 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_languages.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_languages.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_query_fields.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_query_fields.py index 9600c62d2851..cb837b6a2375 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_query_fields.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_addon_query_fields.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_custom_documents.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_custom_documents.py index 139ba615febf..bb707fb254b8 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_custom_documents.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_custom_documents.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_documents_output_in_markdown.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_documents_output_in_markdown.py index 204d3938eb8d..79e6978a93b5 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_documents_output_in_markdown.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_documents_output_in_markdown.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_general_documents.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_general_documents.py index 4891918861df..a0e561b53ec2 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_general_documents.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_general_documents.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_identity_documents.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_identity_documents.py index 2493cc0c7436..d641f2313151 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_identity_documents.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_identity_documents.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_invoices.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_invoices.py index fbad23cf1be8..4d6c653e3662 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_invoices.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_invoices.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_invoices_from_bytes_source.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_invoices_from_bytes_source.py index 9ab3d4d1b691..e015945f5e4a 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_invoices_from_bytes_source.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_invoices_from_bytes_source.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_layout.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_layout.py index 0e42cf1288c6..f881b527730f 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_layout.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_layout.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_receipts.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_receipts.py index 492ddbe8eaa1..d4c92fe4d516 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_receipts.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_receipts.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_receipts_from_url.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_receipts_from_url.py index a090ba55e597..2546ec9fdc1f 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_receipts_from_url.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_receipts_from_url.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_tax_us_w2.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_tax_us_w2.py index 2fa1b3066bb9..b6116f32268d 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_tax_us_w2.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_analyze_tax_us_w2.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_classify_document.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_classify_document.py index 2fafe373726b..f9fbdb8b4037 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_classify_document.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_classify_document.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_compose_model.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_compose_model.py index b06258510624..1263905bf1d7 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_compose_model.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_compose_model.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_convert_to_and_from_dict.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_convert_to_and_from_dict.py index 76084da11cb4..0a42cb337e85 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_convert_to_and_from_dict.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_convert_to_and_from_dict.py @@ -54,7 +54,7 @@ def convert_to_and_from_dict(): # save the dictionary as JSON content in a JSON file with open("data.json", "w") as output_file: json.dump(analyze_result_dict, output_file, indent=4) - + # convert the dictionary back to the original model model = AnalyzeResult(analyze_result_dict) diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_copy_model_to.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_copy_model_to.py index 6b4dc609f829..1436a19ff6b0 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_copy_model_to.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_copy_model_to.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_get_raw_response.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_get_raw_response.py index b86478906f53..0f2bc4733ae0 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_get_raw_response.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_get_raw_response.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_manage_classifiers.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_manage_classifiers.py index 5754a0a482ad..1ff7591aa650 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_manage_classifiers.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_manage_classifiers.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_manage_models.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_manage_models.py index 29f3047a9d3e..8c06c6edbaa9 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_manage_models.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_manage_models.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_send_request.py b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_send_request.py index 9e1d71765e1d..bd7c1451246f 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_send_request.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/samples/sample_send_request.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/setup.py b/sdk/documentintelligence/azure-ai-documentintelligence/setup.py index efc94a219375..1f0db2b0f4c7 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/setup.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/setup.py @@ -5,7 +5,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -# coding: utf-8 + import os import re @@ -14,9 +14,10 @@ PACKAGE_NAME = "azure-ai-documentintelligence" PACKAGE_PPRINT_NAME = "Azure AI Document Intelligence" +PACKAGE_NAMESPACE = "azure.ai.documentintelligence" -# a-b-c => a/b/c -package_folder_path = PACKAGE_NAME.replace("-", "/") +# a.b.c => a/b/c +package_folder_path = PACKAGE_NAMESPACE.replace(".", "/") # Version extraction inspired from 'requests' with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: @@ -29,7 +30,7 @@ setup( name=PACKAGE_NAME, version=version, - description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + description="Microsoft Corporation {} Client Library for Python".format(PACKAGE_PPRINT_NAME), long_description=open("README.md", "r").read(), long_description_content_type="text/markdown", license="MIT License", @@ -42,11 +43,11 @@ "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "License :: OSI Approved :: MIT License", ], zip_safe=False, @@ -64,8 +65,8 @@ }, install_requires=[ "isodate>=0.6.1", - "azure-core>=1.30.0", + "azure-core>=1.35.0", "typing-extensions>=4.6.0", ], - python_requires=">=3.8", + python_requires=">=3.9", ) diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/tests/preparers.py b/sdk/documentintelligence/azure-ai-documentintelligence/tests/preparers.py index cf8df39488b0..fa1b49297612 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/tests/preparers.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/tests/preparers.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/tests/test_dac_analyze_batch_documents.py b/sdk/documentintelligence/azure-ai-documentintelligence/tests/test_dac_analyze_batch_documents.py index ba0e36ca5faf..1840718540d7 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/tests/test_dac_analyze_batch_documents.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/tests/test_dac_analyze_batch_documents.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/tests/test_dac_analyze_batch_documents_async.py b/sdk/documentintelligence/azure-ai-documentintelligence/tests/test_dac_analyze_batch_documents_async.py index 15d23c91e953..51e2d96b5f71 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/tests/test_dac_analyze_batch_documents_async.py +++ b/sdk/documentintelligence/azure-ai-documentintelligence/tests/test_dac_analyze_batch_documents_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. diff --git a/sdk/documentintelligence/azure-ai-documentintelligence/tsp-location.yaml b/sdk/documentintelligence/azure-ai-documentintelligence/tsp-location.yaml index 0d37317f42f6..8375e80ba005 100644 --- a/sdk/documentintelligence/azure-ai-documentintelligence/tsp-location.yaml +++ b/sdk/documentintelligence/azure-ai-documentintelligence/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/DocumentIntelligence -commit: 9c6987bb92aef84ed46afe25f240e9c67a0ddf14 -additionalDirectories: [] +commit: c03820dacaeb1a058d4914fce296e31b160111dc repo: Azure/azure-rest-api-specs +additionalDirectories: