From b35ceb0dd56222d734d742a9dc0e98df50337325 Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Tue, 13 May 2025 21:51:02 +0000 Subject: [PATCH] CodeGen from PR 33408 in Azure/azure-rest-api-specs Merge 54e99e7a19003586366a2a0efa32323a81e14641 into 7fc6689d84858b1c71b786526b04c014c4589968 --- sdk/batch/azure-batch/MANIFEST.in | 2 +- sdk/batch/azure-batch/_meta.json | 6 + sdk/batch/azure-batch/apiview-properties.json | 359 +++ sdk/batch/azure-batch/azure/batch/_client.py | 3 +- .../azure/batch/_operations/_operations.py | 244 +- .../azure/batch/_operations/_patch.py | 561 +--- sdk/batch/azure-batch/azure/batch/_patch.py | 161 +- .../azure/batch/_utils/__init__.py | 6 + .../{_model_base.py => _utils/model_base.py} | 27 +- .../serialization.py} | 26 +- .../batch/{_vendor.py => _utils/utils.py} | 16 +- sdk/batch/azure-batch/azure/batch/_version.py | 2 +- .../azure-batch/azure/batch/aio/_client.py | 3 +- .../batch/aio/_operations/_operations.py | 230 +- .../azure/batch/aio/_operations/_patch.py | 539 +--- .../azure-batch/azure/batch/aio/_patch.py | 54 +- .../azure-batch/azure/batch/aio/_vendor.py | 57 - .../azure/batch/models/__init__.py | 170 +- .../azure-batch/azure/batch/models/_enums.py | 74 +- .../azure-batch/azure/batch/models/_models.py | 2712 ++++++++--------- .../azure-batch/azure/batch/models/_patch.py | 121 +- sdk/batch/azure-batch/setup.py | 7 +- sdk/batch/azure-batch/tests/test_batch.py | 4 +- sdk/batch/azure-batch/tsp-location.yaml | 2 +- 24 files changed, 2114 insertions(+), 3272 deletions(-) create mode 100644 sdk/batch/azure-batch/_meta.json create mode 100644 sdk/batch/azure-batch/apiview-properties.json create mode 100644 sdk/batch/azure-batch/azure/batch/_utils/__init__.py rename sdk/batch/azure-batch/azure/batch/{_model_base.py => _utils/model_base.py} (98%) rename sdk/batch/azure-batch/azure/batch/{_serialization.py => _utils/serialization.py} (98%) rename sdk/batch/azure-batch/azure/batch/{_vendor.py => _utils/utils.py} (84%) delete mode 100644 sdk/batch/azure-batch/azure/batch/aio/_vendor.py diff --git a/sdk/batch/azure-batch/MANIFEST.in b/sdk/batch/azure-batch/MANIFEST.in index cb1e2b1128cb..06208ba9fde2 100644 --- a/sdk/batch/azure-batch/MANIFEST.in +++ b/sdk/batch/azure-batch/MANIFEST.in @@ -3,4 +3,4 @@ include LICENSE include azure/batch/py.typed recursive-include tests *.py recursive-include samples *.py *.md -include azure/__init__.py \ No newline at end of file +include azure/__init__.py diff --git a/sdk/batch/azure-batch/_meta.json b/sdk/batch/azure-batch/_meta.json new file mode 100644 index 000000000000..44c9b37a1aa9 --- /dev/null +++ b/sdk/batch/azure-batch/_meta.json @@ -0,0 +1,6 @@ +{ + "commit": "da5f436da0537251f7336b56f1e2df48c634d147", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/batch/Azure.Batch", + "@azure-tools/typespec-python": "0.44.2" +} \ No newline at end of file diff --git a/sdk/batch/azure-batch/apiview-properties.json b/sdk/batch/azure-batch/apiview-properties.json new file mode 100644 index 000000000000..5767cdc46000 --- /dev/null +++ b/sdk/batch/azure-batch/apiview-properties.json @@ -0,0 +1,359 @@ +{ + "CrossLanguagePackageId": "Azure.Batch", + "CrossLanguageDefinitionId": { + "azure.batch.models.AuthenticationTokenSettings": "Azure.Batch.AuthenticationTokenSettings", + "azure.batch.models.AutomaticOsUpgradePolicy": "Azure.Batch.AutomaticOsUpgradePolicy", + "azure.batch.models.AutoScaleRun": "Azure.Batch.AutoScaleRun", + "azure.batch.models.AutoScaleRunError": "Azure.Batch.AutoScaleRunError", + "azure.batch.models.AutoUserSpecification": "Azure.Batch.AutoUserSpecification", + "azure.batch.models.AzureBlobFileSystemConfiguration": "Azure.Batch.AzureBlobFileSystemConfiguration", + "azure.batch.models.AzureFileShareConfiguration": "Azure.Batch.AzureFileShareConfiguration", + "azure.batch.models.BatchAffinityInfo": "Azure.Batch.BatchAffinityInfo", + "azure.batch.models.BatchApplication": "Azure.Batch.BatchApplication", + "azure.batch.models.BatchApplicationPackageReference": "Azure.Batch.BatchApplicationPackageReference", + "azure.batch.models.BatchAutoPoolSpecification": "Azure.Batch.BatchAutoPoolSpecification", + "azure.batch.models.BatchCertificate": "Azure.Batch.BatchCertificate", + "azure.batch.models.BatchCertificateDeleteError": "Azure.Batch.BatchCertificateDeleteError", + "azure.batch.models.BatchCertificateReference": "Azure.Batch.BatchCertificateReference", + "azure.batch.models.BatchContainerConfiguration": "Azure.Batch.BatchContainerConfiguration", + "azure.batch.models.BatchCreateTaskCollectionResult": "Azure.Batch.BatchCreateTaskCollectionResult", + "azure.batch.models.BatchDiffDiskSettings": "Azure.Batch.BatchDiffDiskSettings", + "azure.batch.models.BatchError": "Azure.Batch.BatchError", + "azure.batch.models.BatchErrorDetail": "Azure.Batch.BatchErrorDetail", + "azure.batch.models.BatchErrorMessage": "Azure.Batch.BatchErrorMessage", + "azure.batch.models.BatchInboundNatPool": "Azure.Batch.BatchInboundNatPool", + "azure.batch.models.BatchJob": "Azure.Batch.BatchJob", + "azure.batch.models.BatchJobConstraints": "Azure.Batch.BatchJobConstraints", + "azure.batch.models.BatchJobCreateOptions": "Azure.Batch.BatchJobCreateOptions", + "azure.batch.models.BatchJobDisableOptions": "Azure.Batch.BatchJobDisableOptions", + "azure.batch.models.BatchJobExecutionInfo": "Azure.Batch.BatchJobExecutionInfo", + "azure.batch.models.BatchJobManagerTask": "Azure.Batch.BatchJobManagerTask", + "azure.batch.models.BatchJobNetworkConfiguration": "Azure.Batch.BatchJobNetworkConfiguration", + "azure.batch.models.BatchJobPreparationAndReleaseTaskStatus": "Azure.Batch.BatchJobPreparationAndReleaseTaskStatus", + "azure.batch.models.BatchJobPreparationTask": "Azure.Batch.BatchJobPreparationTask", + "azure.batch.models.BatchJobPreparationTaskExecutionInfo": "Azure.Batch.BatchJobPreparationTaskExecutionInfo", + "azure.batch.models.BatchJobReleaseTask": "Azure.Batch.BatchJobReleaseTask", + "azure.batch.models.BatchJobReleaseTaskExecutionInfo": "Azure.Batch.BatchJobReleaseTaskExecutionInfo", + "azure.batch.models.BatchJobSchedule": "Azure.Batch.BatchJobSchedule", + "azure.batch.models.BatchJobScheduleConfiguration": "Azure.Batch.BatchJobScheduleConfiguration", + "azure.batch.models.BatchJobScheduleCreateOptions": "Azure.Batch.BatchJobScheduleCreateOptions", + "azure.batch.models.BatchJobScheduleExecutionInfo": "Azure.Batch.BatchJobScheduleExecutionInfo", + "azure.batch.models.BatchJobScheduleStatistics": "Azure.Batch.BatchJobScheduleStatistics", + "azure.batch.models.BatchJobScheduleUpdateOptions": "Azure.Batch.BatchJobScheduleUpdateOptions", + "azure.batch.models.BatchJobSchedulingError": "Azure.Batch.BatchJobSchedulingError", + "azure.batch.models.BatchJobSpecification": "Azure.Batch.BatchJobSpecification", + "azure.batch.models.BatchJobStatistics": "Azure.Batch.BatchJobStatistics", + "azure.batch.models.BatchJobTerminateOptions": "Azure.Batch.BatchJobTerminateOptions", + "azure.batch.models.BatchJobUpdateOptions": "Azure.Batch.BatchJobUpdateOptions", + "azure.batch.models.BatchMetadataItem": "Azure.Batch.BatchMetadataItem", + "azure.batch.models.BatchNode": "Azure.Batch.BatchNode", + "azure.batch.models.BatchNodeAgentInfo": "Azure.Batch.BatchNodeAgentInfo", + "azure.batch.models.BatchNodeCounts": "Azure.Batch.BatchNodeCounts", + "azure.batch.models.BatchNodeDeallocateOptions": "Azure.Batch.BatchNodeDeallocateOptions", + "azure.batch.models.BatchNodeDisableSchedulingOptions": "Azure.Batch.BatchNodeDisableSchedulingOptions", + "azure.batch.models.BatchNodeEndpointConfiguration": "Azure.Batch.BatchNodeEndpointConfiguration", + "azure.batch.models.BatchNodeError": "Azure.Batch.BatchNodeError", + "azure.batch.models.BatchNodeFile": "Azure.Batch.BatchNodeFile", + "azure.batch.models.BatchNodeIdentityReference": "Azure.Batch.BatchNodeIdentityReference", + "azure.batch.models.BatchNodeInfo": "Azure.Batch.BatchNodeInfo", + "azure.batch.models.BatchNodePlacementConfiguration": "Azure.Batch.BatchNodePlacementConfiguration", + "azure.batch.models.BatchNodeRebootOptions": "Azure.Batch.BatchNodeRebootOptions", + "azure.batch.models.BatchNodeReimageOptions": "Azure.Batch.BatchNodeReimageOptions", + "azure.batch.models.BatchNodeRemoteLoginSettings": "Azure.Batch.BatchNodeRemoteLoginSettings", + "azure.batch.models.BatchNodeRemoveOptions": "Azure.Batch.BatchNodeRemoveOptions", + "azure.batch.models.BatchNodeUserCreateOptions": "Azure.Batch.BatchNodeUserCreateOptions", + "azure.batch.models.BatchNodeUserUpdateOptions": "Azure.Batch.BatchNodeUserUpdateOptions", + "azure.batch.models.BatchNodeVMExtension": "Azure.Batch.BatchNodeVMExtension", + "azure.batch.models.BatchOsDisk": "Azure.Batch.BatchOsDisk", + "azure.batch.models.BatchPool": "Azure.Batch.BatchPool", + "azure.batch.models.BatchPoolAutoScaleEnableOptions": "Azure.Batch.BatchPoolAutoScaleEnableOptions", + "azure.batch.models.BatchPoolAutoScaleEvaluateOptions": "Azure.Batch.BatchPoolAutoScaleEvaluateOptions", + "azure.batch.models.BatchPoolCreateOptions": "Azure.Batch.BatchPoolCreateOptions", + "azure.batch.models.BatchPoolEndpointConfiguration": "Azure.Batch.BatchPoolEndpointConfiguration", + "azure.batch.models.BatchPoolIdentity": "Azure.Batch.BatchPoolIdentity", + "azure.batch.models.BatchPoolInfo": "Azure.Batch.BatchPoolInfo", + "azure.batch.models.BatchPoolNodeCounts": "Azure.Batch.BatchPoolNodeCounts", + "azure.batch.models.BatchPoolReplaceOptions": "Azure.Batch.BatchPoolReplaceOptions", + "azure.batch.models.BatchPoolResizeOptions": "Azure.Batch.BatchPoolResizeOptions", + "azure.batch.models.BatchPoolResourceStatistics": "Azure.Batch.BatchPoolResourceStatistics", + "azure.batch.models.BatchPoolSpecification": "Azure.Batch.BatchPoolSpecification", + "azure.batch.models.BatchPoolStatistics": "Azure.Batch.BatchPoolStatistics", + "azure.batch.models.BatchPoolUpdateOptions": "Azure.Batch.BatchPoolUpdateOptions", + "azure.batch.models.BatchPoolUsageMetrics": "Azure.Batch.BatchPoolUsageMetrics", + "azure.batch.models.BatchPoolUsageStatistics": "Azure.Batch.BatchPoolUsageStatistics", + "azure.batch.models.BatchPublicIpAddressConfiguration": "Azure.Batch.BatchPublicIpAddressConfiguration", + "azure.batch.models.BatchStartTask": "Azure.Batch.BatchStartTask", + "azure.batch.models.BatchStartTaskInfo": "Azure.Batch.BatchStartTaskInfo", + "azure.batch.models.BatchSubtask": "Azure.Batch.BatchSubtask", + "azure.batch.models.BatchSupportedImage": "Azure.Batch.BatchSupportedImage", + "azure.batch.models.BatchTask": "Azure.Batch.BatchTask", + "azure.batch.models.BatchTaskConstraints": "Azure.Batch.BatchTaskConstraints", + "azure.batch.models.BatchTaskContainerExecutionInfo": "Azure.Batch.BatchTaskContainerExecutionInfo", + "azure.batch.models.BatchTaskContainerSettings": "Azure.Batch.BatchTaskContainerSettings", + "azure.batch.models.BatchTaskCounts": "Azure.Batch.BatchTaskCounts", + "azure.batch.models.BatchTaskCountsResult": "Azure.Batch.BatchTaskCountsResult", + "azure.batch.models.BatchTaskCreateOptions": "Azure.Batch.BatchTaskCreateOptions", + "azure.batch.models.BatchTaskCreateResult": "Azure.Batch.BatchTaskCreateResult", + "azure.batch.models.BatchTaskDependencies": "Azure.Batch.BatchTaskDependencies", + "azure.batch.models.BatchTaskExecutionInfo": "Azure.Batch.BatchTaskExecutionInfo", + "azure.batch.models.BatchTaskFailureInfo": "Azure.Batch.BatchTaskFailureInfo", + "azure.batch.models.BatchTaskGroup": "Azure.Batch.BatchTaskGroup", + "azure.batch.models.BatchTaskIdRange": "Azure.Batch.BatchTaskIdRange", + "azure.batch.models.BatchTaskInfo": "Azure.Batch.BatchTaskInfo", + "azure.batch.models.BatchTaskSchedulingPolicy": "Azure.Batch.BatchTaskSchedulingPolicy", + "azure.batch.models.BatchTaskSlotCounts": "Azure.Batch.BatchTaskSlotCounts", + "azure.batch.models.BatchTaskStatistics": "Azure.Batch.BatchTaskStatistics", + "azure.batch.models.BatchUefiSettings": "Azure.Batch.BatchUefiSettings", + "azure.batch.models.BatchUserAssignedIdentity": "Azure.Batch.BatchUserAssignedIdentity", + "azure.batch.models.BatchVmImageReference": "Azure.Batch.BatchVmImageReference", + "azure.batch.models.CifsMountConfiguration": "Azure.Batch.CifsMountConfiguration", + "azure.batch.models.ContainerHostBatchBindMountEntry": "Azure.Batch.ContainerHostBatchBindMountEntry", + "azure.batch.models.ContainerRegistryReference": "Azure.Batch.ContainerRegistryReference", + "azure.batch.models.DataDisk": "Azure.Batch.DataDisk", + "azure.batch.models.DiskEncryptionConfiguration": "Azure.Batch.DiskEncryptionConfiguration", + "azure.batch.models.EnvironmentSetting": "Azure.Batch.EnvironmentSetting", + "azure.batch.models.ExitCodeMapping": "Azure.Batch.ExitCodeMapping", + "azure.batch.models.ExitCodeRangeMapping": "Azure.Batch.ExitCodeRangeMapping", + "azure.batch.models.ExitConditions": "Azure.Batch.ExitConditions", + "azure.batch.models.ExitOptions": "Azure.Batch.ExitOptions", + "azure.batch.models.FileProperties": "Azure.Batch.FileProperties", + "azure.batch.models.InboundEndpoint": "Azure.Batch.InboundEndpoint", + "azure.batch.models.InstanceViewStatus": "Azure.Batch.InstanceViewStatus", + "azure.batch.models.LinuxUserConfiguration": "Azure.Batch.LinuxUserConfiguration", + "azure.batch.models.ManagedDisk": "Azure.Batch.ManagedDisk", + "azure.batch.models.MountConfiguration": "Azure.Batch.MountConfiguration", + "azure.batch.models.MultiInstanceSettings": "Azure.Batch.MultiInstanceSettings", + "azure.batch.models.NameValuePair": "Azure.Batch.NameValuePair", + "azure.batch.models.NetworkConfiguration": "Azure.Batch.NetworkConfiguration", + "azure.batch.models.NetworkSecurityGroupRule": "Azure.Batch.NetworkSecurityGroupRule", + "azure.batch.models.NfsMountConfiguration": "Azure.Batch.NfsMountConfiguration", + "azure.batch.models.OutputFile": "Azure.Batch.OutputFile", + "azure.batch.models.OutputFileBlobContainerDestination": "Azure.Batch.OutputFileBlobContainerDestination", + "azure.batch.models.OutputFileDestination": "Azure.Batch.OutputFileDestination", + "azure.batch.models.OutputFileUploadConfig": "Azure.Batch.OutputFileUploadConfig", + "azure.batch.models.OutputFileUploadHeader": "Azure.Batch.OutputFileUploadHeader", + "azure.batch.models.RecentBatchJob": "Azure.Batch.RecentBatchJob", + "azure.batch.models.ResizeError": "Azure.Batch.ResizeError", + "azure.batch.models.ResourceFile": "Azure.Batch.ResourceFile", + "azure.batch.models.RollingUpgradePolicy": "Azure.Batch.RollingUpgradePolicy", + "azure.batch.models.SecurityProfile": "Azure.Batch.SecurityProfile", + "azure.batch.models.ServiceArtifactReference": "Azure.Batch.ServiceArtifactReference", + "azure.batch.models.UpgradePolicy": "Azure.Batch.UpgradePolicy", + "azure.batch.models.UploadBatchServiceLogsOptions": "Azure.Batch.UploadBatchServiceLogsOptions", + "azure.batch.models.UploadBatchServiceLogsResult": "Azure.Batch.UploadBatchServiceLogsResult", + "azure.batch.models.UserAccount": "Azure.Batch.UserAccount", + "azure.batch.models.UserIdentity": "Azure.Batch.UserIdentity", + "azure.batch.models.VirtualMachineConfiguration": "Azure.Batch.VirtualMachineConfiguration", + "azure.batch.models.VirtualMachineInfo": "Azure.Batch.VirtualMachineInfo", + "azure.batch.models.VmDiskSecurityProfile": "Azure.Batch.BatchVMDiskSecurityProfile", + "azure.batch.models.VMExtension": "Azure.Batch.VMExtension", + "azure.batch.models.VMExtensionInstanceView": "Azure.Batch.VMExtensionInstanceView", + "azure.batch.models.WindowsConfiguration": "Azure.Batch.WindowsConfiguration", + "azure.batch.models.WindowsUserConfiguration": "Azure.Batch.WindowsUserConfiguration", + "azure.batch.models.CachingType": "Azure.Batch.CachingType", + "azure.batch.models.StorageAccountType": "Azure.Batch.StorageAccountType", + "azure.batch.models.ContainerType": "Azure.Batch.ContainerType", + "azure.batch.models.DiskEncryptionTarget": "Azure.Batch.DiskEncryptionTarget", + "azure.batch.models.BatchNodePlacementPolicyType": "Azure.Batch.BatchNodePlacementPolicyType", + "azure.batch.models.DiffDiskPlacement": "Azure.Batch.DiffDiskPlacement", + "azure.batch.models.SecurityEncryptionTypes": "Azure.Batch.SecurityEncryptionTypes", + "azure.batch.models.SecurityTypes": "Azure.Batch.SecurityTypes", + "azure.batch.models.DynamicVNetAssignmentScope": "Azure.Batch.DynamicVNetAssignmentScope", + "azure.batch.models.InboundEndpointProtocol": "Azure.Batch.InboundEndpointProtocol", + "azure.batch.models.NetworkSecurityGroupRuleAccess": "Azure.Batch.NetworkSecurityGroupRuleAccess", + "azure.batch.models.IpAddressProvisioningType": "Azure.Batch.IpAddressProvisioningType", + "azure.batch.models.ContainerWorkingDirectory": "Azure.Batch.ContainerWorkingDirectory", + "azure.batch.models.ContainerHostDataPath": "Azure.Batch.ContainerHostDataPath", + "azure.batch.models.AutoUserScope": "Azure.Batch.AutoUserScope", + "azure.batch.models.ElevationLevel": "Azure.Batch.ElevationLevel", + "azure.batch.models.BatchCertificateStoreLocation": "Azure.Batch.BatchCertificateStoreLocation", + "azure.batch.models.BatchCertificateVisibility": "Azure.Batch.BatchCertificateVisibility", + "azure.batch.models.BatchNodeFillType": "Azure.Batch.BatchNodeFillType", + "azure.batch.models.LoginMode": "Azure.Batch.LoginMode", + "azure.batch.models.BatchNodeCommunicationMode": "Azure.Batch.BatchNodeCommunicationMode", + "azure.batch.models.UpgradeMode": "Azure.Batch.UpgradeMode", + "azure.batch.models.BatchPoolState": "Azure.Batch.BatchPoolState", + "azure.batch.models.AllocationState": "Azure.Batch.AllocationState", + "azure.batch.models.BatchPoolIdentityType": "Azure.Batch.BatchPoolIdentityType", + "azure.batch.models.BatchNodeDeallocationOption": "Azure.Batch.BatchNodeDeallocationOption", + "azure.batch.models.OSType": "Azure.Batch.OSType", + "azure.batch.models.ImageVerificationType": "Azure.Batch.ImageVerificationType", + "azure.batch.models.BatchJobState": "Azure.Batch.BatchJobState", + "azure.batch.models.OutputFileUploadCondition": "Azure.Batch.OutputFileUploadCondition", + "azure.batch.models.BatchAccessScope": "Azure.Batch.BatchAccessScope", + "azure.batch.models.BatchPoolLifetimeOption": "Azure.Batch.BatchPoolLifetimeOption", + "azure.batch.models.BatchAllTasksCompleteMode": "Azure.Batch.BatchAllTasksCompleteMode", + "azure.batch.models.BatchTaskFailureMode": "Azure.Batch.BatchTaskFailureMode", + "azure.batch.models.BatchErrorSourceCategory": "Azure.Batch.BatchErrorSourceCategory", + "azure.batch.models.DisableBatchJobOption": "Azure.Batch.DisableBatchJobOption", + "azure.batch.models.BatchJobPreparationTaskState": "Azure.Batch.BatchJobPreparationTaskState", + "azure.batch.models.BatchTaskExecutionResult": "Azure.Batch.BatchTaskExecutionResult", + "azure.batch.models.BatchJobReleaseTaskState": "Azure.Batch.BatchJobReleaseTaskState", + "azure.batch.models.BatchCertificateState": "Azure.Batch.BatchCertificateState", + "azure.batch.models.BatchCertificateFormat": "Azure.Batch.BatchCertificateFormat", + "azure.batch.models.BatchJobScheduleState": "Azure.Batch.BatchJobScheduleState", + "azure.batch.models.BatchJobActionKind": "Azure.Batch.BatchJobActionKind", + "azure.batch.models.DependencyAction": "Azure.Batch.DependencyAction", + "azure.batch.models.BatchTaskState": "Azure.Batch.BatchTaskState", + "azure.batch.models.BatchTaskAddStatus": "Azure.Batch.BatchTaskAddStatus", + "azure.batch.models.BatchSubtaskState": "Azure.Batch.BatchSubtaskState", + "azure.batch.models.BatchNodeState": "Azure.Batch.BatchNodeState", + "azure.batch.models.SchedulingState": "Azure.Batch.SchedulingState", + "azure.batch.models.BatchStartTaskState": "Azure.Batch.BatchStartTaskState", + "azure.batch.models.BatchNodeRebootKind": "Azure.Batch.BatchNodeRebootKind", + "azure.batch.models.BatchNodeReimageOption": "Azure.Batch.BatchNodeReimageOption", + "azure.batch.models.BatchNodeDeallocateOption": "Azure.Batch.BatchNodeDeallocateOption", + "azure.batch.models.BatchNodeDisableSchedulingOption": "Azure.Batch.BatchNodeDisableSchedulingOption", + "azure.batch.models.StatusLevelTypes": "Azure.Batch.StatusLevelTypes", + "azure.batch.BatchClient.list_applications": "Client.BatchClient.listApplications", + "azure.batch.aio.BatchClient.list_applications": "Client.BatchClient.listApplications", + "azure.batch.BatchClient.get_application": "Client.BatchClient.getApplication", + "azure.batch.aio.BatchClient.get_application": "Client.BatchClient.getApplication", + "azure.batch.BatchClient.list_pool_usage_metrics": "Client.BatchClient.listPoolUsageMetrics", + "azure.batch.aio.BatchClient.list_pool_usage_metrics": "Client.BatchClient.listPoolUsageMetrics", + "azure.batch.BatchClient.create_pool": "Client.BatchClient.createPool", + "azure.batch.aio.BatchClient.create_pool": "Client.BatchClient.createPool", + "azure.batch.BatchClient.list_pools": "Client.BatchClient.listPools", + "azure.batch.aio.BatchClient.list_pools": "Client.BatchClient.listPools", + "azure.batch.BatchClient.delete_pool": "Client.BatchClient.deletePool", + "azure.batch.aio.BatchClient.delete_pool": "Client.BatchClient.deletePool", + "azure.batch.BatchClient.pool_exists": "Client.BatchClient.poolExists", + "azure.batch.aio.BatchClient.pool_exists": "Client.BatchClient.poolExists", + "azure.batch.BatchClient.get_pool": "Client.BatchClient.getPool", + "azure.batch.aio.BatchClient.get_pool": "Client.BatchClient.getPool", + "azure.batch.BatchClient.update_pool": "Client.BatchClient.updatePool", + "azure.batch.aio.BatchClient.update_pool": "Client.BatchClient.updatePool", + "azure.batch.BatchClient.disable_pool_auto_scale": "Client.BatchClient.disablePoolAutoScale", + "azure.batch.aio.BatchClient.disable_pool_auto_scale": "Client.BatchClient.disablePoolAutoScale", + "azure.batch.BatchClient.enable_pool_auto_scale": "Client.BatchClient.enablePoolAutoScale", + "azure.batch.aio.BatchClient.enable_pool_auto_scale": "Client.BatchClient.enablePoolAutoScale", + "azure.batch.BatchClient.evaluate_pool_auto_scale": "Client.BatchClient.evaluatePoolAutoScale", + "azure.batch.aio.BatchClient.evaluate_pool_auto_scale": "Client.BatchClient.evaluatePoolAutoScale", + "azure.batch.BatchClient.resize_pool": "Client.BatchClient.resizePool", + "azure.batch.aio.BatchClient.resize_pool": "Client.BatchClient.resizePool", + "azure.batch.BatchClient.stop_pool_resize": "Client.BatchClient.stopPoolResize", + "azure.batch.aio.BatchClient.stop_pool_resize": "Client.BatchClient.stopPoolResize", + "azure.batch.BatchClient.replace_pool_properties": "Client.BatchClient.replacePoolProperties", + "azure.batch.aio.BatchClient.replace_pool_properties": "Client.BatchClient.replacePoolProperties", + "azure.batch.BatchClient.remove_nodes": "Client.BatchClient.removeNodes", + "azure.batch.aio.BatchClient.remove_nodes": "Client.BatchClient.removeNodes", + "azure.batch.BatchClient.list_supported_images": "Client.BatchClient.listSupportedImages", + "azure.batch.aio.BatchClient.list_supported_images": "Client.BatchClient.listSupportedImages", + "azure.batch.BatchClient.list_pool_node_counts": "Client.BatchClient.listPoolNodeCounts", + "azure.batch.aio.BatchClient.list_pool_node_counts": "Client.BatchClient.listPoolNodeCounts", + "azure.batch.BatchClient.delete_job": "Client.BatchClient.deleteJob", + "azure.batch.aio.BatchClient.delete_job": "Client.BatchClient.deleteJob", + "azure.batch.BatchClient.get_job": "Client.BatchClient.getJob", + "azure.batch.aio.BatchClient.get_job": "Client.BatchClient.getJob", + "azure.batch.BatchClient.update_job": "Client.BatchClient.updateJob", + "azure.batch.aio.BatchClient.update_job": "Client.BatchClient.updateJob", + "azure.batch.BatchClient.replace_job": "Client.BatchClient.replaceJob", + "azure.batch.aio.BatchClient.replace_job": "Client.BatchClient.replaceJob", + "azure.batch.BatchClient.disable_job": "Client.BatchClient.disableJob", + "azure.batch.aio.BatchClient.disable_job": "Client.BatchClient.disableJob", + "azure.batch.BatchClient.enable_job": "Client.BatchClient.enableJob", + "azure.batch.aio.BatchClient.enable_job": "Client.BatchClient.enableJob", + "azure.batch.BatchClient.terminate_job": "Client.BatchClient.terminateJob", + "azure.batch.aio.BatchClient.terminate_job": "Client.BatchClient.terminateJob", + "azure.batch.BatchClient.create_job": "Client.BatchClient.createJob", + "azure.batch.aio.BatchClient.create_job": "Client.BatchClient.createJob", + "azure.batch.BatchClient.list_jobs": "Client.BatchClient.listJobs", + "azure.batch.aio.BatchClient.list_jobs": "Client.BatchClient.listJobs", + "azure.batch.BatchClient.list_jobs_from_schedule": "Client.BatchClient.listJobsFromSchedule", + "azure.batch.aio.BatchClient.list_jobs_from_schedule": "Client.BatchClient.listJobsFromSchedule", + "azure.batch.BatchClient.list_job_preparation_and_release_task_status": "Client.BatchClient.listJobPreparationAndReleaseTaskStatus", + "azure.batch.aio.BatchClient.list_job_preparation_and_release_task_status": "Client.BatchClient.listJobPreparationAndReleaseTaskStatus", + "azure.batch.BatchClient.get_job_task_counts": "Client.BatchClient.getJobTaskCounts", + "azure.batch.aio.BatchClient.get_job_task_counts": "Client.BatchClient.getJobTaskCounts", + "azure.batch.BatchClient.create_certificate": "Client.BatchClient.createCertificate", + "azure.batch.aio.BatchClient.create_certificate": "Client.BatchClient.createCertificate", + "azure.batch.BatchClient.list_certificates": "Client.BatchClient.listCertificates", + "azure.batch.aio.BatchClient.list_certificates": "Client.BatchClient.listCertificates", + "azure.batch.BatchClient.cancel_certificate_deletion": "Client.BatchClient.cancelCertificateDeletion", + "azure.batch.aio.BatchClient.cancel_certificate_deletion": "Client.BatchClient.cancelCertificateDeletion", + "azure.batch.BatchClient.delete_certificate": "Client.BatchClient.deleteCertificate", + "azure.batch.aio.BatchClient.delete_certificate": "Client.BatchClient.deleteCertificate", + "azure.batch.BatchClient.get_certificate": "Client.BatchClient.getCertificate", + "azure.batch.aio.BatchClient.get_certificate": "Client.BatchClient.getCertificate", + "azure.batch.BatchClient.job_schedule_exists": "Client.BatchClient.jobScheduleExists", + "azure.batch.aio.BatchClient.job_schedule_exists": "Client.BatchClient.jobScheduleExists", + "azure.batch.BatchClient.delete_job_schedule": "Client.BatchClient.deleteJobSchedule", + "azure.batch.aio.BatchClient.delete_job_schedule": "Client.BatchClient.deleteJobSchedule", + "azure.batch.BatchClient.get_job_schedule": "Client.BatchClient.getJobSchedule", + "azure.batch.aio.BatchClient.get_job_schedule": "Client.BatchClient.getJobSchedule", + "azure.batch.BatchClient.update_job_schedule": "Client.BatchClient.updateJobSchedule", + "azure.batch.aio.BatchClient.update_job_schedule": "Client.BatchClient.updateJobSchedule", + "azure.batch.BatchClient.replace_job_schedule": "Client.BatchClient.replaceJobSchedule", + "azure.batch.aio.BatchClient.replace_job_schedule": "Client.BatchClient.replaceJobSchedule", + "azure.batch.BatchClient.disable_job_schedule": "Client.BatchClient.disableJobSchedule", + "azure.batch.aio.BatchClient.disable_job_schedule": "Client.BatchClient.disableJobSchedule", + "azure.batch.BatchClient.enable_job_schedule": "Client.BatchClient.enableJobSchedule", + "azure.batch.aio.BatchClient.enable_job_schedule": "Client.BatchClient.enableJobSchedule", + "azure.batch.BatchClient.terminate_job_schedule": "Client.BatchClient.terminateJobSchedule", + "azure.batch.aio.BatchClient.terminate_job_schedule": "Client.BatchClient.terminateJobSchedule", + "azure.batch.BatchClient.create_job_schedule": "Client.BatchClient.createJobSchedule", + "azure.batch.aio.BatchClient.create_job_schedule": "Client.BatchClient.createJobSchedule", + "azure.batch.BatchClient.list_job_schedules": "Client.BatchClient.listJobSchedules", + "azure.batch.aio.BatchClient.list_job_schedules": "Client.BatchClient.listJobSchedules", + "azure.batch.BatchClient.create_task": "Client.BatchClient.createTask", + "azure.batch.aio.BatchClient.create_task": "Client.BatchClient.createTask", + "azure.batch.BatchClient.list_tasks": "Client.BatchClient.listTasks", + "azure.batch.aio.BatchClient.list_tasks": "Client.BatchClient.listTasks", + "azure.batch.BatchClient.create_task_collection": "Client.BatchClient.createTaskCollection", + "azure.batch.aio.BatchClient.create_task_collection": "Client.BatchClient.createTaskCollection", + "azure.batch.BatchClient.delete_task": "Client.BatchClient.deleteTask", + "azure.batch.aio.BatchClient.delete_task": "Client.BatchClient.deleteTask", + "azure.batch.BatchClient.get_task": "Client.BatchClient.getTask", + "azure.batch.aio.BatchClient.get_task": "Client.BatchClient.getTask", + "azure.batch.BatchClient.replace_task": "Client.BatchClient.replaceTask", + "azure.batch.aio.BatchClient.replace_task": "Client.BatchClient.replaceTask", + "azure.batch.BatchClient.list_sub_tasks": "Client.BatchClient.listSubTasks", + "azure.batch.aio.BatchClient.list_sub_tasks": "Client.BatchClient.listSubTasks", + "azure.batch.BatchClient.terminate_task": "Client.BatchClient.terminateTask", + "azure.batch.aio.BatchClient.terminate_task": "Client.BatchClient.terminateTask", + "azure.batch.BatchClient.reactivate_task": "Client.BatchClient.reactivateTask", + "azure.batch.aio.BatchClient.reactivate_task": "Client.BatchClient.reactivateTask", + "azure.batch.BatchClient.delete_task_file": "Client.BatchClient.deleteTaskFile", + "azure.batch.aio.BatchClient.delete_task_file": "Client.BatchClient.deleteTaskFile", + "azure.batch.BatchClient.get_task_file": "Client.BatchClient.getTaskFile", + "azure.batch.aio.BatchClient.get_task_file": "Client.BatchClient.getTaskFile", + "azure.batch.BatchClient.list_task_files": "Client.BatchClient.listTaskFiles", + "azure.batch.aio.BatchClient.list_task_files": "Client.BatchClient.listTaskFiles", + "azure.batch.BatchClient.create_node_user": "Client.BatchClient.createNodeUser", + "azure.batch.aio.BatchClient.create_node_user": "Client.BatchClient.createNodeUser", + "azure.batch.BatchClient.delete_node_user": "Client.BatchClient.deleteNodeUser", + "azure.batch.aio.BatchClient.delete_node_user": "Client.BatchClient.deleteNodeUser", + "azure.batch.BatchClient.replace_node_user": "Client.BatchClient.replaceNodeUser", + "azure.batch.aio.BatchClient.replace_node_user": "Client.BatchClient.replaceNodeUser", + "azure.batch.BatchClient.get_node": "Client.BatchClient.getNode", + "azure.batch.aio.BatchClient.get_node": "Client.BatchClient.getNode", + "azure.batch.BatchClient.reboot_node": "Client.BatchClient.rebootNode", + "azure.batch.aio.BatchClient.reboot_node": "Client.BatchClient.rebootNode", + "azure.batch.BatchClient.start_node": "Client.BatchClient.startNode", + "azure.batch.aio.BatchClient.start_node": "Client.BatchClient.startNode", + "azure.batch.BatchClient.reimage_node": "Client.BatchClient.reimageNode", + "azure.batch.aio.BatchClient.reimage_node": "Client.BatchClient.reimageNode", + "azure.batch.BatchClient.deallocate_node": "Client.BatchClient.deallocateNode", + "azure.batch.aio.BatchClient.deallocate_node": "Client.BatchClient.deallocateNode", + "azure.batch.BatchClient.disable_node_scheduling": "Client.BatchClient.disableNodeScheduling", + "azure.batch.aio.BatchClient.disable_node_scheduling": "Client.BatchClient.disableNodeScheduling", + "azure.batch.BatchClient.enable_node_scheduling": "Client.BatchClient.enableNodeScheduling", + "azure.batch.aio.BatchClient.enable_node_scheduling": "Client.BatchClient.enableNodeScheduling", + "azure.batch.BatchClient.get_node_remote_login_settings": "Client.BatchClient.getNodeRemoteLoginSettings", + "azure.batch.aio.BatchClient.get_node_remote_login_settings": "Client.BatchClient.getNodeRemoteLoginSettings", + "azure.batch.BatchClient.upload_node_logs": "Client.BatchClient.uploadNodeLogs", + "azure.batch.aio.BatchClient.upload_node_logs": "Client.BatchClient.uploadNodeLogs", + "azure.batch.BatchClient.list_nodes": "Client.BatchClient.listNodes", + "azure.batch.aio.BatchClient.list_nodes": "Client.BatchClient.listNodes", + "azure.batch.BatchClient.get_node_extension": "Client.BatchClient.getNodeExtension", + "azure.batch.aio.BatchClient.get_node_extension": "Client.BatchClient.getNodeExtension", + "azure.batch.BatchClient.list_node_extensions": "Client.BatchClient.listNodeExtensions", + "azure.batch.aio.BatchClient.list_node_extensions": "Client.BatchClient.listNodeExtensions", + "azure.batch.BatchClient.delete_node_file": "Client.BatchClient.deleteNodeFile", + "azure.batch.aio.BatchClient.delete_node_file": "Client.BatchClient.deleteNodeFile", + "azure.batch.BatchClient.get_node_file": "Client.BatchClient.getNodeFile", + "azure.batch.aio.BatchClient.get_node_file": "Client.BatchClient.getNodeFile", + "azure.batch.BatchClient.list_node_files": "Client.BatchClient.listNodeFiles", + "azure.batch.aio.BatchClient.list_node_files": "Client.BatchClient.listNodeFiles" + } +} \ No newline at end of file diff --git a/sdk/batch/azure-batch/azure/batch/_client.py b/sdk/batch/azure-batch/azure/batch/_client.py index 4ce5c282dbf4..3657c6c6ed45 100644 --- a/sdk/batch/azure-batch/azure/batch/_client.py +++ b/sdk/batch/azure-batch/azure/batch/_client.py @@ -16,7 +16,7 @@ from ._configuration import BatchClientConfiguration from ._operations import BatchClientOperationsMixin -from ._serialization import Deserializer, Serializer +from ._utils.serialization import Deserializer, Serializer if TYPE_CHECKING: from azure.core.credentials import TokenCredential @@ -39,6 +39,7 @@ class BatchClient(BatchClientOperationsMixin): def __init__(self, endpoint: str, credential: "TokenCredential", **kwargs: Any) -> None: _endpoint = "{endpoint}" self._config = BatchClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + kwargs["request_id_header_name"] = "client-request-id" _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py index 14c180e935e8..e621753632b5 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py @@ -6,13 +6,13 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping import datetime import json -import sys from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, TypeVar import urllib.parse -from azure.core import MatchConditions +from azure.core import MatchConditions, PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -31,14 +31,11 @@ from azure.core.utils import case_insensitive_dict from .. import models as _models -from .._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize -from .._serialization import Serializer -from .._vendor import BatchClientMixinABC, prep_if_match, prep_if_none_match - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore +from .._configuration import BatchClientConfiguration +from .._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from .._utils.serialization import Serializer +from .._utils.utils import ClientMixinABC, prep_if_match, prep_if_none_match + T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] @@ -478,11 +475,11 @@ def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -560,11 +557,11 @@ def build_batch_resize_pool_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -690,11 +687,11 @@ def build_batch_remove_nodes_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -1013,11 +1010,11 @@ def build_batch_disable_job_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -1114,11 +1111,11 @@ def build_batch_terminate_job_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -2453,7 +2450,7 @@ def build_batch_get_task_file_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_batch_get_task_file_properties_request( # pylint: disable=name-too-long +def build_batch_get_task_file_properties_internal_request( # pylint: disable=name-too-long job_id: str, task_id: str, file_path: str, @@ -3209,7 +3206,7 @@ def build_batch_get_node_file_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_batch_get_node_file_properties_request( # pylint: disable=name-too-long +def build_batch_get_node_file_properties_internal_request( # pylint: disable=name-too-long pool_id: str, node_id: str, file_path: str, @@ -3298,7 +3295,9 @@ def build_batch_list_node_files_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -class BatchClientOperationsMixin(BatchClientMixinABC): # pylint: disable=too-many-public-methods +class BatchClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], BatchClientConfiguration] +): @distributed_trace def list_applications( @@ -3387,7 +3386,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchApplication], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchApplication], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -3610,7 +3609,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -3636,7 +3635,7 @@ def get_next(next_link=None): @distributed_trace def create_pool( # pylint: disable=inconsistent-return-statements self, - pool: _models.BatchPoolCreateContent, + pool: _models.BatchPoolCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -3649,7 +3648,7 @@ def create_pool( # pylint: disable=inconsistent-return-statements to Microsoft Support engineers. :param pool: The Pool to be created. Required. - :type pool: ~azure.batch.models.BatchPoolCreateContent + :type pool: ~azure.batch.models.BatchPoolCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -3814,7 +3813,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPool], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPool], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -4177,7 +4176,7 @@ def get_pool( def update_pool( # pylint: disable=inconsistent-return-statements self, pool_id: str, - pool: _models.BatchPoolUpdateContent, + pool: _models.BatchPoolUpdateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -4196,7 +4195,7 @@ def update_pool( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param pool: The pool properties to update. Required. - :type pool: ~azure.batch.models.BatchPoolUpdateContent + :type pool: ~azure.batch.models.BatchPoolUpdateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -4363,7 +4362,7 @@ def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements self, pool_id: str, - content: _models.BatchPoolEnableAutoScaleContent, + enable_auto_scale_options: _models.BatchPoolAutoScaleEnableOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -4384,8 +4383,8 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :param content: The options to use for enabling automatic scaling. Required. - :type content: ~azure.batch.models.BatchPoolEnableAutoScaleContent + :param enable_auto_scale_options: The options to use for enabling automatic scaling. Required. + :type enable_auto_scale_options: ~azure.batch.models.BatchPoolAutoScaleEnableOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -4435,7 +4434,7 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(enable_auto_scale_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_enable_pool_auto_scale_request( pool_id=pool_id, @@ -4482,7 +4481,7 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements def evaluate_pool_auto_scale( self, pool_id: str, - content: _models.BatchPoolEvaluateAutoScaleContent, + evaluate_auto_scale_options: _models.BatchPoolAutoScaleEvaluateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -4497,8 +4496,9 @@ def evaluate_pool_auto_scale( :param pool_id: The ID of the Pool on which to evaluate the automatic scaling formula. Required. :type pool_id: str - :param content: The options to use for evaluating the automatic scaling formula. Required. - :type content: ~azure.batch.models.BatchPoolEvaluateAutoScaleContent + :param evaluate_auto_scale_options: The options to use for evaluating the automatic scaling + formula. Required. + :type evaluate_auto_scale_options: ~azure.batch.models.BatchPoolAutoScaleEvaluateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -4527,7 +4527,7 @@ def evaluate_pool_auto_scale( ) cls: ClsType[_models.AutoScaleRun] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(evaluate_auto_scale_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_evaluate_pool_auto_scale_request( pool_id=pool_id, @@ -4582,7 +4582,7 @@ def evaluate_pool_auto_scale( def resize_pool( # pylint: disable=inconsistent-return-statements self, pool_id: str, - content: _models.BatchPoolResizeContent, + resize_options: _models.BatchPoolResizeOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -4604,8 +4604,8 @@ def resize_pool( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :param content: The options to use for resizing the pool. Required. - :type content: ~azure.batch.models.BatchPoolResizeContent + :param resize_options: The options to use for resizing the pool. Required. + :type resize_options: ~azure.batch.models.BatchPoolResizeOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -4655,7 +4655,7 @@ def resize_pool( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(resize_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_resize_pool_request( pool_id=pool_id, @@ -4812,7 +4812,7 @@ def stop_pool_resize( # pylint: disable=inconsistent-return-statements def replace_pool_properties( # pylint: disable=inconsistent-return-statements self, pool_id: str, - pool: _models.BatchPoolReplaceContent, + pool: _models.BatchPoolReplaceOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -4827,7 +4827,7 @@ def replace_pool_properties( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool to update. Required. :type pool_id: str :param pool: The options to use for replacing properties on the pool. Required. - :type pool: ~azure.batch.models.BatchPoolReplaceContent + :type pool: ~azure.batch.models.BatchPoolReplaceOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -4899,7 +4899,7 @@ def replace_pool_properties( # pylint: disable=inconsistent-return-statements def remove_nodes( # pylint: disable=inconsistent-return-statements self, pool_id: str, - content: _models.BatchNodeRemoveContent, + remove_options: _models.BatchNodeRemoveOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -4917,8 +4917,8 @@ def remove_nodes( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :param content: The options to use for removing the node. Required. - :type content: ~azure.batch.models.BatchNodeRemoveContent + :param remove_options: The options to use for removing the node. Required. + :type remove_options: ~azure.batch.models.BatchNodeRemoveOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -4968,7 +4968,7 @@ def remove_nodes( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(remove_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_remove_nodes_request( pool_id=pool_id, @@ -5101,7 +5101,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -5214,7 +5214,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -5477,7 +5477,7 @@ def get_job( def update_job( # pylint: disable=inconsistent-return-statements self, job_id: str, - job: _models.BatchJobUpdateContent, + job: _models.BatchJobUpdateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -5496,7 +5496,7 @@ def update_job( # pylint: disable=inconsistent-return-statements :param job_id: The ID of the Job whose properties you want to update. Required. :type job_id: str :param job: The options to use for updating the Job. Required. - :type job: ~azure.batch.models.BatchJobUpdateContent + :type job: ~azure.batch.models.BatchJobUpdateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -5709,7 +5709,7 @@ def replace_job( # pylint: disable=inconsistent-return-statements def disable_job( # pylint: disable=inconsistent-return-statements self, job_id: str, - content: _models.BatchJobDisableContent, + disable_options: _models.BatchJobDisableOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -5732,8 +5732,8 @@ def disable_job( # pylint: disable=inconsistent-return-statements :param job_id: The ID of the Job to disable. Required. :type job_id: str - :param content: The options to use for disabling the Job. Required. - :type content: ~azure.batch.models.BatchJobDisableContent + :param disable_options: The options to use for disabling the Job. Required. + :type disable_options: ~azure.batch.models.BatchJobDisableOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -5783,7 +5783,7 @@ def disable_job( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(disable_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_disable_job_request( job_id=job_id, @@ -5939,7 +5939,7 @@ def enable_job( # pylint: disable=inconsistent-return-statements def terminate_job( # pylint: disable=inconsistent-return-statements self, job_id: str, - parameters: Optional[_models.BatchJobTerminateContent] = None, + options: Optional[_models.BatchJobTerminateOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -5961,8 +5961,8 @@ def terminate_job( # pylint: disable=inconsistent-return-statements :param job_id: The ID of the Job to terminate. Required. :type job_id: str - :param parameters: The options to use for terminating the Job. Default value is None. - :type parameters: ~azure.batch.models.BatchJobTerminateContent + :param options: The options to use for terminating the Job. Default value is None. + :type options: ~azure.batch.models.BatchJobTerminateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -6015,8 +6015,8 @@ def terminate_job( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -6065,7 +6065,7 @@ def terminate_job( # pylint: disable=inconsistent-return-statements @distributed_trace def create_job( # pylint: disable=inconsistent-return-statements self, - job: _models.BatchJobCreateContent, + job: _models.BatchJobCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -6084,7 +6084,7 @@ def create_job( # pylint: disable=inconsistent-return-statements engineers. :param job: The Job to be created. Required. - :type job: ~azure.batch.models.BatchJobCreateContent + :type job: ~azure.batch.models.BatchJobCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -6249,7 +6249,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6375,7 +6375,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6503,7 +6503,9 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized["value"]) + list_of_elem = _deserialize( + List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized.get("value", []) + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6785,7 +6787,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6991,7 +6993,7 @@ def get_certificate( ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any - ) -> _models.GetCertificateResponse: + ) -> _models.BatchCertificate: """Gets information about the specified Certificate. :param thumbprint_algorithm: The algorithm used to derive the thumbprint parameter. This must @@ -7009,8 +7011,8 @@ def get_certificate( :paramtype ocpdate: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] - :return: GetCertificateResponse. The GetCertificateResponse is compatible with MutableMapping - :rtype: ~azure.batch.models.GetCertificateResponse + :return: BatchCertificate. The BatchCertificate is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchCertificate :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -7024,7 +7026,7 @@ def get_certificate( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.GetCertificateResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.BatchCertificate] = kwargs.pop("cls", None) _request = build_batch_get_certificate_request( thumbprint_algorithm=thumbprint_algorithm, @@ -7067,7 +7069,7 @@ def get_certificate( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.GetCertificateResponse, response.json()) + deserialized = _deserialize(_models.BatchCertificate, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -7414,7 +7416,7 @@ def get_job_schedule( def update_job_schedule( # pylint: disable=inconsistent-return-statements self, job_schedule_id: str, - job_schedule: _models.BatchJobScheduleUpdateContent, + job_schedule: _models.BatchJobScheduleUpdateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -7435,7 +7437,7 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule_id: The ID of the Job Schedule to update. Required. :type job_schedule_id: str :param job_schedule: The options to use for updating the Job Schedule. Required. - :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateContent + :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -7967,7 +7969,7 @@ def terminate_job_schedule( # pylint: disable=inconsistent-return-statements @distributed_trace def create_job_schedule( # pylint: disable=inconsistent-return-statements self, - job_schedule: _models.BatchJobScheduleCreateContent, + job_schedule: _models.BatchJobScheduleCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -7978,7 +7980,7 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements Creates a Job Schedule to the specified Account. :param job_schedule: The Job Schedule to be created. Required. - :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateContent + :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -8143,7 +8145,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -8170,7 +8172,7 @@ def get_next(next_link=None): def create_task( # pylint: disable=inconsistent-return-statements self, job_id: str, - task: _models.BatchTaskCreateContent, + task: _models.BatchTaskCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -8185,7 +8187,7 @@ def create_task( # pylint: disable=inconsistent-return-statements :param job_id: The ID of the Job to which the Task is to be created. Required. :type job_id: str :param task: The Task to be created. Required. - :type task: ~azure.batch.models.BatchTaskCreateContent + :type task: ~azure.batch.models.BatchTaskCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -8357,7 +8359,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchTask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchTask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -8389,7 +8391,7 @@ def create_task_collection( timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: + ) -> _models.BatchCreateTaskCollectionResult: """Adds a collection of Tasks to the specified Job. Note that each Task must have a unique ID. The Batch service may not return the @@ -8419,9 +8421,9 @@ def create_task_collection( current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with - MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult + :return: BatchCreateTaskCollectionResult. The BatchCreateTaskCollectionResult is compatible + with MutableMapping + :rtype: ~azure.batch.models.BatchCreateTaskCollectionResult :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -8438,7 +8440,7 @@ def create_task_collection( content_type: str = kwargs.pop( "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") ) - cls: ClsType[_models.BatchTaskAddCollectionResult] = kwargs.pop("cls", None) + cls: ClsType[_models.BatchCreateTaskCollectionResult] = kwargs.pop("cls", None) _content = json.dumps(task_collection, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore @@ -8483,7 +8485,7 @@ def create_task_collection( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.BatchTaskAddCollectionResult, response.json()) + deserialized = _deserialize(_models.BatchCreateTaskCollectionResult, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -8936,7 +8938,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -9389,8 +9391,6 @@ def get_task_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace def _get_task_file_properties_internal( self, @@ -9447,7 +9447,7 @@ def _get_task_file_properties_internal( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_batch_get_task_file_properties_request( + _request = build_batch_get_task_file_properties_internal_request( job_id=job_id, task_id=task_id, file_path=file_path, @@ -9597,7 +9597,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -9625,7 +9625,7 @@ def create_node_user( # pylint: disable=inconsistent-return-statements self, pool_id: str, node_id: str, - user: _models.BatchNodeUserCreateContent, + user: _models.BatchNodeUserCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -9641,7 +9641,7 @@ def create_node_user( # pylint: disable=inconsistent-return-statements :param node_id: The ID of the machine on which you want to create a user Account. Required. :type node_id: str :param user: The options to use for creating the user. Required. - :type user: ~azure.batch.models.BatchNodeUserCreateContent + :type user: ~azure.batch.models.BatchNodeUserCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -9797,7 +9797,7 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements pool_id: str, node_id: str, user_name: str, - content: _models.BatchNodeUserUpdateContent, + update_options: _models.BatchNodeUserUpdateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -9816,8 +9816,8 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements :type node_id: str :param user_name: The name of the user Account to update. Required. :type user_name: str - :param content: The options to use for updating the user. Required. - :type content: ~azure.batch.models.BatchNodeUserUpdateContent + :param update_options: The options to use for updating the user. Required. + :type update_options: ~azure.batch.models.BatchNodeUserUpdateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -9846,7 +9846,7 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(update_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_replace_node_user_request( pool_id=pool_id, @@ -9986,7 +9986,7 @@ def reboot_node( # pylint: disable=inconsistent-return-statements self, pool_id: str, node_id: str, - parameters: Optional[_models.BatchNodeRebootContent] = None, + options: Optional[_models.BatchNodeRebootOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -10000,8 +10000,8 @@ def reboot_node( # pylint: disable=inconsistent-return-statements :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :param parameters: The options to use for rebooting the Compute Node. Default value is None. - :type parameters: ~azure.batch.models.BatchNodeRebootContent + :param options: The options to use for rebooting the Compute Node. Default value is None. + :type options: ~azure.batch.models.BatchNodeRebootOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -10030,8 +10030,8 @@ def reboot_node( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -10157,7 +10157,7 @@ def reimage_node( # pylint: disable=inconsistent-return-statements self, pool_id: str, node_id: str, - parameters: Optional[_models.BatchNodeReimageContent] = None, + options: Optional[_models.BatchNodeReimageOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -10173,8 +10173,8 @@ def reimage_node( # pylint: disable=inconsistent-return-statements :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :param parameters: The options to use for reimaging the Compute Node. Default value is None. - :type parameters: ~azure.batch.models.BatchNodeReimageContent + :param options: The options to use for reimaging the Compute Node. Default value is None. + :type options: ~azure.batch.models.BatchNodeReimageOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -10203,8 +10203,8 @@ def reimage_node( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -10251,7 +10251,7 @@ def deallocate_node( # pylint: disable=inconsistent-return-statements self, pool_id: str, node_id: str, - parameters: Optional[_models.BatchNodeDeallocateContent] = None, + options: Optional[_models.BatchNodeDeallocateOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -10265,8 +10265,8 @@ def deallocate_node( # pylint: disable=inconsistent-return-statements :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :param parameters: The options to use for deallocating the Compute Node. Default value is None. - :type parameters: ~azure.batch.models.BatchNodeDeallocateContent + :param options: The options to use for deallocating the Compute Node. Default value is None. + :type options: ~azure.batch.models.BatchNodeDeallocateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -10295,8 +10295,8 @@ def deallocate_node( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -10343,7 +10343,7 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements self, pool_id: str, node_id: str, - parameters: Optional[_models.BatchNodeDisableSchedulingContent] = None, + options: Optional[_models.BatchNodeDisableSchedulingOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -10359,9 +10359,9 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements :param node_id: The ID of the Compute Node on which you want to disable Task scheduling. Required. :type node_id: str - :param parameters: The options to use for disabling scheduling on the Compute Node. Default - value is None. - :type parameters: ~azure.batch.models.BatchNodeDisableSchedulingContent + :param options: The options to use for disabling scheduling on the Compute Node. Default value + is None. + :type options: ~azure.batch.models.BatchNodeDisableSchedulingOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -10390,8 +10390,8 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -10612,7 +10612,7 @@ def upload_node_logs( self, pool_id: str, node_id: str, - content: _models.UploadBatchServiceLogsContent, + upload_options: _models.UploadBatchServiceLogsOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -10631,8 +10631,8 @@ def upload_node_logs( :param node_id: The ID of the Compute Node for which you want to get the Remote Desktop Protocol file. Required. :type node_id: str - :param content: The Azure Batch service log files upload options. Required. - :type content: ~azure.batch.models.UploadBatchServiceLogsContent + :param upload_options: The Azure Batch service log files upload options. Required. + :type upload_options: ~azure.batch.models.UploadBatchServiceLogsOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -10662,7 +10662,7 @@ def upload_node_logs( ) cls: ClsType[_models.UploadBatchServiceLogsResult] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(upload_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_upload_node_logs_request( pool_id=pool_id, @@ -10811,7 +10811,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNode], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNode], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -11028,7 +11028,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -11256,8 +11256,6 @@ def get_node_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace def _get_node_file_properties_internal( self, @@ -11314,7 +11312,7 @@ def _get_node_file_properties_internal( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_batch_get_node_file_properties_request( + _request = build_batch_get_node_file_properties_internal_request( pool_id=pool_id, node_id=node_id, file_path=file_path, @@ -11462,7 +11460,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py index 4e0857b30791..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py @@ -1,391 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import datetime -from typing import Any, Deque, List, Optional, Iterable, Iterator, overload -import collections -import logging -import threading +from typing import List -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from azure.core.rest import HttpResponse -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from ._operations import ( - BatchClientOperationsMixin as BatchClientOperationsMixinGenerated, -) - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -__all__: List[str] = [ - "BatchClientOperationsMixin" -] # Add all objects you want publicly available to users at this package level - - -class BatchClientOperationsMixin(BatchClientOperationsMixinGenerated): - """Customize generated code""" - - # create_task_collection renamed - @distributed_trace - def create_tasks( - self, - job_id: str, - task_collection: List[_models.BatchTaskCreateContent], - concurrencies: int = 0, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: - """Adds a collection of Tasks to the specified Job. - - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. - - :param job_id: The ID of the Job to which the Task collection is to be added. Required. - :type job_id: str - :param task_collection: The Tasks to be added. Required. - :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrencies: number of threads to use in parallel when adding tasks. If specified - and greater than 0, will start additional threads to submit requests and wait for them to finish. - Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrencies: int - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword content_type: Type of content. Default value is "application/json; - odata=minimalmetadata". - :paramtype content_type: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult - :raises ~azure.batch.custom.CreateTasksError - """ - - kwargs.update({"timeout": timeout, "ocpdate": ocpdate}) - - # deque operations(append/pop) are thread-safe - results_queue: Deque[_models.BatchTaskAddResult] = collections.deque() - task_workflow_manager = _TaskWorkflowManager( - self, job_id=job_id, task_collection=task_collection, **kwargs - ) - - # multi-threaded behavior - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") - - active_threads = [] - for i in range(concurrencies): - active_threads.append( - threading.Thread( - target=task_workflow_manager.task_collection_thread_handler, - args=(results_queue,), - ) - ) - active_threads[-1].start() - for thread in active_threads: - thread.join() - # single-threaded behavior - else: - task_workflow_manager.task_collection_thread_handler(results_queue) - - # Only define error if all threads have finished and there were failures - if task_workflow_manager.failure_tasks or task_workflow_manager.errors: - raise _models.CreateTasksError( - task_workflow_manager.tasks_to_add, - task_workflow_manager.failure_tasks, - task_workflow_manager.errors, - ) - else: - submitted_tasks = _handle_output(results_queue) - return _models.BatchTaskAddCollectionResult(value=submitted_tasks) - - @distributed_trace - def get_node_file( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> Iterator[bytes]: - """Returns the content of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - args = [pool_id, node_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return super().get_node_file(*args, **kwargs) - - @distributed_trace - def get_node_file_properties( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_node_file_properties_internal( # type: ignore - pool_id, - node_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - def get_task_file_properties( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_task_file_properties_internal( # type: ignore - job_id, - task_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - def get_task_file( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> Iterator[bytes]: - """Returns the content of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return super().get_task_file(*args, **kwargs) +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -395,174 +19,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -class _TaskWorkflowManager: - """Worker class for one create_task_collection request - - :param ~TaskOperations task_operations: Parent object which instantiated this - :param str job_id: The ID of the job to which the task collection is to be - added. - :param tasks_to_add: The collection of tasks to add. - :type tasks_to_add: list of :class:`TaskAddParameter - ` - :param task_create_task_collection_options: Additional parameters for the - operation - :type task_create_task_collection_options: :class:`BatchTaskAddCollectionResult - ` - """ - - def __init__( - self, - batch_client: BatchClientOperationsMixin, - job_id: str, - task_collection: Iterable[_models.BatchTaskCreateContent], - **kwargs - ): - # Append operations thread safe - Only read once all threads have completed - # List of tasks which failed to add due to a returned client error - self.failure_tasks: Deque[_models.BatchTaskAddResult] = collections.deque() - # List of unknown exceptions which occurred during requests. - self.errors: Deque[Any] = collections.deque() - - # synchronized through lock variables - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - self.tasks_to_add = collections.deque(task_collection) - self._error_lock = threading.Lock() - self._max_tasks_lock = threading.Lock() - self._pending_queue_lock = threading.Lock() - - # Variables to be used for task create_task_collection requests - self._batch_client = batch_client - self._job_id = job_id - - self._kwargs = kwargs - - def _bulk_add_tasks(self, results_queue, chunk_tasks_to_add): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :param chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :type chunk_tasks_to_add: list[~BatchTaskAddResult] - """ - - try: - create_task_collection_response: _models.BatchTaskAddCollectionResult = ( - self._batch_client.create_task_collection( - job_id=self._job_id, - task_collection=_models.BatchTaskGroup(value=chunk_tasks_to_add), - **self._kwargs - ) - ) - except HttpResponseError as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error and e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self.errors.appendleft(e) - _LOGGER.error( - "Failed to add task with ID %s due to the body" " exceeding the maximum request size", - failed_task.id, - ) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - # Restrict one thread at a time to do this compare and set, - # therefore forcing max_tasks_per_request to be strictly decreasing - with self._max_tasks_lock: - if midpoint < self._max_tasks_per_request: - _LOGGER.info( - "Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", - str(self._max_tasks_per_request), - str(midpoint), - ) - self._max_tasks_per_request = midpoint - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another thread . - self.tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: - self.tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - else: - try: - create_task_collection_response = create_task_collection_response.output - except AttributeError: - pass - if create_task_collection_response.value: - for task_result in create_task_collection_response.value: # pylint: disable=no-member - if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: - # Server error will be retried - with self._pending_queue_lock: - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self.tasks_to_add.appendleft(task) - elif ( - task_result.status == _models.BatchTaskAddStatus.CLIENT_ERROR - and not (task_result.error and task_result.error.code == "TaskExists") - ): - # Client error will be recorded unless Task already exists - self.failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - def task_collection_thread_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self.tasks_to_add and not self.errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - with self._pending_queue_lock: - while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: - chunk_tasks_to_add.append(self.tasks_to_add.pop()) - - if chunk_tasks_to_add: - self._bulk_add_tasks(results_queue, chunk_tasks_to_add) - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted create_task_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results diff --git a/sdk/batch/azure-batch/azure/batch/_patch.py b/sdk/batch/azure-batch/azure/batch/_patch.py index a9f1f6eeca2a..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/_patch.py @@ -1,162 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import base64 -import hmac -import hashlib -import importlib -from datetime import datetime -from typing import TYPE_CHECKING, TypeVar, Any, Union +from typing import List -from azure.core.pipeline.policies import SansIOHTTPPolicy -from azure.core.credentials import AzureNamedKeyCredential, TokenCredential -from azure.core.pipeline import PipelineResponse, PipelineRequest -from azure.core.pipeline.transport import HttpResponse -from azure.core.rest import HttpRequest - -from ._client import BatchClient as GenerateBatchClient -from ._serialization import ( - Serializer, - TZ_UTC, -) - -try: - from urlparse import urlparse, parse_qs -except ImportError: - from urllib.parse import urlparse, parse_qs -__all__ = [ - "BatchClient", -] # Add all objects you want publicly available to users at this package level - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Optional, TypeVar, Union - - from azure.core.credentials import TokenCredential - from azure.core.pipeline import PipelineRequest - - ClientType = TypeVar("ClientType", bound="BatchClient") - T = TypeVar("T") - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - - -class BatchSharedKeyAuthPolicy(SansIOHTTPPolicy): - - headers_to_sign = [ - "content-encoding", - "content-language", - "content-length", - "content-md5", - "content-type", - "date", - "if-modified-since", - "if-match", - "if-none-match", - "if-unmodified-since", - "range", - ] - - def __init__(self, credential: AzureNamedKeyCredential): - super(BatchSharedKeyAuthPolicy, self).__init__() - self._account_name = credential.named_key.name - self._key = credential.named_key.key - - def on_request(self, request: PipelineRequest): - if not request.http_request.headers.get("ocp-date"): - now = datetime.utcnow() - now = now.replace(tzinfo=TZ_UTC) - request.http_request.headers["ocp-date"] = Serializer.serialize_rfc(now) - url = urlparse(request.http_request.url) - uri_path = url.path - - # method to sign - string_to_sign = request.http_request.method + "\n" - - # get headers to sign - request_header_dict = {key.lower(): val for key, val in request.http_request.headers.items() if val} - - if request.http_request.method not in ["GET", "HEAD"]: - if "content-length" not in request_header_dict: - request_header_dict["content-length"] = "0" - - request_headers = [str(request_header_dict.get(x, "")) for x in self.headers_to_sign] - - string_to_sign += "\n".join(request_headers) + "\n" - - # get ocp- header to sign - ocp_headers = [] - for name, value in request.http_request.headers.items(): - if "ocp-" in name and value: - ocp_headers.append((name.lower(), value)) - for name, value in sorted(ocp_headers): - string_to_sign += "{}:{}\n".format(name, value) - # get account_name and uri path to sign - string_to_sign += "/{}{}".format(self._account_name, uri_path) - - # get query string to sign if it is not table service - query_to_sign = parse_qs(url.query) - - for name in sorted(query_to_sign.keys()): - value = query_to_sign[name][0] - if value: - string_to_sign += "\n{}:{}".format(name, value) - # sign the request - auth_string = "SharedKey {}:{}".format(self._account_name, self._sign_string(string_to_sign)) - - request.http_request.headers["Authorization"] = auth_string - - return super().on_request(request) - - def _sign_string(self, string_to_sign): - - _key = self._key.encode("utf-8") - string_to_sign = string_to_sign.encode("utf-8") - - try: - key = base64.b64decode(_key) - except TypeError: - raise ValueError("Invalid key value: {}".format(self._key)) - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - - return base64.b64encode(digest).decode("utf-8") - - -class BatchClient(GenerateBatchClient): - """BatchClient. - - :param endpoint: HTTP or HTTPS endpoint for the Web PubSub service instance. - :type endpoint: str - :param hub: Target hub name, which should start with alphabetic characters and only contain - alpha-numeric characters or underscore. - :type hub: str - :param credentials: Credential needed for the client to connect to Azure. - :type credentials: ~azure.identity.ClientSecretCredential, ~azure.core.credentials.AzureNamedKeyCredential, - or ~azure.identity.TokenCredentials - :keyword api_version: Api Version. The default value is "2021-10-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: Union[AzureNamedKeyCredential, TokenCredential], **kwargs): - super().__init__( - endpoint=endpoint, - credential=credential, # type: ignore - authentication_policy=kwargs.pop( - "authentication_policy", self._format_shared_key_credential("", credential) - ), - **kwargs - ) - - def _format_shared_key_credential(self, account_name, credential): - if isinstance(credential, AzureNamedKeyCredential): - return BatchSharedKeyAuthPolicy(credential) - return None +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/batch/azure-batch/azure/batch/_utils/__init__.py b/sdk/batch/azure-batch/azure/batch/_utils/__init__.py new file mode 100644 index 000000000000..8026245c2abc --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_utils/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- diff --git a/sdk/batch/azure-batch/azure/batch/_model_base.py b/sdk/batch/azure-batch/azure/batch/_utils/model_base.py similarity index 98% rename from sdk/batch/azure-batch/azure/batch/_model_base.py rename to sdk/batch/azure-batch/azure/batch/_utils/model_base.py index 3072ee252ed9..49d5c7259389 100644 --- a/sdk/batch/azure-batch/azure/batch/_model_base.py +++ b/sdk/batch/azure-batch/azure/batch/_utils/model_base.py @@ -2,8 +2,9 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=protected-access, broad-except @@ -21,6 +22,7 @@ from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import xml.etree.ElementTree as ET +from collections.abc import MutableMapping from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError @@ -28,11 +30,6 @@ from azure.core.pipeline import PipelineResponse from azure.core.serialization import _Null -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping - _LOGGER = logging.getLogger(__name__) __all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] @@ -347,7 +344,7 @@ def _get_model(module_name: str, model_name: str): _UNSET = object() -class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object +class _MyMutableMapping(MutableMapping[str, typing.Any]): def __init__(self, data: typing.Dict[str, typing.Any]) -> None: self._data = data @@ -407,13 +404,13 @@ def get(self, key: str, default: typing.Any = None) -> typing.Any: return default @typing.overload - def pop(self, key: str) -> typing.Any: ... + def pop(self, key: str) -> typing.Any: ... # pylint: disable=arguments-differ @typing.overload - def pop(self, key: str, default: _T) -> _T: ... + def pop(self, key: str, default: _T) -> _T: ... # pylint: disable=signature-differs @typing.overload - def pop(self, key: str, default: typing.Any) -> typing.Any: ... + def pop(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: """ @@ -443,7 +440,7 @@ def clear(self) -> None: """ self._data.clear() - def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: # pylint: disable=arguments-differ """ Updates D from mapping/iterable E and F. :param any args: Either a mapping object or an iterable of key-value pairs. @@ -454,7 +451,7 @@ def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: def setdefault(self, key: str, default: None = None) -> None: ... @typing.overload - def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: """ @@ -644,7 +641,7 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - return super().__new__(cls) # pylint: disable=no-value-for-parameter + return super().__new__(cls) def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: @@ -680,7 +677,7 @@ def _deserialize(cls, data, exist_discriminators): discriminator_value = data.find(xml_name).text # pyright: ignore else: discriminator_value = data.get(discriminator._rest_name) - mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member return mapped_cls._deserialize(data, exist_discriminators) def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: diff --git a/sdk/batch/azure-batch/azure/batch/_serialization.py b/sdk/batch/azure-batch/azure/batch/_utils/serialization.py similarity index 98% rename from sdk/batch/azure-batch/azure/batch/_serialization.py rename to sdk/batch/azure-batch/azure/batch/_utils/serialization.py index 7a0232de5ddc..eb86ea23c965 100644 --- a/sdk/batch/azure-batch/azure/batch/_serialization.py +++ b/sdk/batch/azure-batch/azure/batch/_utils/serialization.py @@ -1,28 +1,10 @@ # pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 # -------------------------------------------------------------------------- -# # Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pyright: reportUnnecessaryTypeIgnoreComment=false diff --git a/sdk/batch/azure-batch/azure/batch/_vendor.py b/sdk/batch/azure-batch/azure/batch/_utils/utils.py similarity index 84% rename from sdk/batch/azure-batch/azure/batch/_vendor.py rename to sdk/batch/azure-batch/azure/batch/_utils/utils.py index 396a0128421a..927adb7c8ae2 100644 --- a/sdk/batch/azure-batch/azure/batch/_vendor.py +++ b/sdk/batch/azure-batch/azure/batch/_utils/utils.py @@ -6,23 +6,23 @@ # -------------------------------------------------------------------------- from abc import ABC -from typing import Optional, TYPE_CHECKING +from typing import Generic, Optional, TYPE_CHECKING, TypeVar from azure.core import MatchConditions -from ._configuration import BatchClientConfiguration - if TYPE_CHECKING: - from azure.core import PipelineClient + from .serialization import Deserializer, Serializer + - from ._serialization import Deserializer, Serializer +TClient = TypeVar("TClient") +TConfig = TypeVar("TConfig") -class BatchClientMixinABC(ABC): +class ClientMixinABC(ABC, Generic[TClient, TConfig]): """DO NOT use this class. It is for internal typing use only.""" - _client: "PipelineClient" - _config: BatchClientConfiguration + _client: TClient + _config: TConfig _serialize: "Serializer" _deserialize: "Deserializer" diff --git a/sdk/batch/azure-batch/azure/batch/_version.py b/sdk/batch/azure-batch/azure/batch/_version.py index 0068a9979d9d..be71c81bd282 100644 --- a/sdk/batch/azure-batch/azure/batch/_version.py +++ b/sdk/batch/azure-batch/azure/batch/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "15.0.0b2" +VERSION = "1.0.0b1" diff --git a/sdk/batch/azure-batch/azure/batch/aio/_client.py b/sdk/batch/azure-batch/azure/batch/aio/_client.py index 6a2133679d27..6a1093347c66 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_client.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_client.py @@ -14,7 +14,7 @@ from azure.core.pipeline import policies from azure.core.rest import AsyncHttpResponse, HttpRequest -from .._serialization import Deserializer, Serializer +from .._utils.serialization import Deserializer, Serializer from ._configuration import BatchClientConfiguration from ._operations import BatchClientOperationsMixin @@ -39,6 +39,7 @@ class BatchClient(BatchClientOperationsMixin): def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: _endpoint = "{endpoint}" self._config = BatchClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + kwargs["request_id_header_name"] = "client-request-id" _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py index ddda2a1d449d..7fe39d06a2fb 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py @@ -6,13 +6,13 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping import datetime import json -import sys from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, List, Optional, TypeVar import urllib.parse -from azure.core import MatchConditions +from azure.core import AsyncPipelineClient, MatchConditions from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -32,7 +32,6 @@ from azure.core.utils import case_insensitive_dict from ... import models as _models -from ..._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize from ..._operations._operations import ( build_batch_cancel_certificate_deletion_request, build_batch_create_certificate_request, @@ -66,12 +65,12 @@ build_batch_get_job_schedule_request, build_batch_get_job_task_counts_request, build_batch_get_node_extension_request, - build_batch_get_node_file_properties_request, + build_batch_get_node_file_properties_internal_request, build_batch_get_node_file_request, build_batch_get_node_remote_login_settings_request, build_batch_get_node_request, build_batch_get_pool_request, - build_batch_get_task_file_properties_request, + build_batch_get_task_file_properties_internal_request, build_batch_get_task_file_request, build_batch_get_task_request, build_batch_job_schedule_exists_request, @@ -112,17 +111,17 @@ build_batch_update_pool_request, build_batch_upload_node_logs_request, ) -from .._vendor import BatchClientMixinABC +from ..._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from ..._utils.utils import ClientMixinABC +from .._configuration import BatchClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] -class BatchClientOperationsMixin(BatchClientMixinABC): # pylint: disable=too-many-public-methods +class BatchClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], BatchClientConfiguration] +): @distributed_trace def list_applications( @@ -211,7 +210,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchApplication], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchApplication], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -434,7 +433,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -460,7 +459,7 @@ async def get_next(next_link=None): @distributed_trace_async async def create_pool( self, - pool: _models.BatchPoolCreateContent, + pool: _models.BatchPoolCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -473,7 +472,7 @@ async def create_pool( to Microsoft Support engineers. :param pool: The Pool to be created. Required. - :type pool: ~azure.batch.models.BatchPoolCreateContent + :type pool: ~azure.batch.models.BatchPoolCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -638,7 +637,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPool], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPool], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -1001,7 +1000,7 @@ async def get_pool( async def update_pool( self, pool_id: str, - pool: _models.BatchPoolUpdateContent, + pool: _models.BatchPoolUpdateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -1020,7 +1019,7 @@ async def update_pool( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param pool: The pool properties to update. Required. - :type pool: ~azure.batch.models.BatchPoolUpdateContent + :type pool: ~azure.batch.models.BatchPoolUpdateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -1187,7 +1186,7 @@ async def disable_pool_auto_scale( async def enable_pool_auto_scale( self, pool_id: str, - content: _models.BatchPoolEnableAutoScaleContent, + enable_auto_scale_options: _models.BatchPoolAutoScaleEnableOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -1208,8 +1207,8 @@ async def enable_pool_auto_scale( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :param content: The options to use for enabling automatic scaling. Required. - :type content: ~azure.batch.models.BatchPoolEnableAutoScaleContent + :param enable_auto_scale_options: The options to use for enabling automatic scaling. Required. + :type enable_auto_scale_options: ~azure.batch.models.BatchPoolAutoScaleEnableOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -1259,7 +1258,7 @@ async def enable_pool_auto_scale( ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(enable_auto_scale_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_enable_pool_auto_scale_request( pool_id=pool_id, @@ -1306,7 +1305,7 @@ async def enable_pool_auto_scale( async def evaluate_pool_auto_scale( self, pool_id: str, - content: _models.BatchPoolEvaluateAutoScaleContent, + evaluate_auto_scale_options: _models.BatchPoolAutoScaleEvaluateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -1321,8 +1320,9 @@ async def evaluate_pool_auto_scale( :param pool_id: The ID of the Pool on which to evaluate the automatic scaling formula. Required. :type pool_id: str - :param content: The options to use for evaluating the automatic scaling formula. Required. - :type content: ~azure.batch.models.BatchPoolEvaluateAutoScaleContent + :param evaluate_auto_scale_options: The options to use for evaluating the automatic scaling + formula. Required. + :type evaluate_auto_scale_options: ~azure.batch.models.BatchPoolAutoScaleEvaluateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -1351,7 +1351,7 @@ async def evaluate_pool_auto_scale( ) cls: ClsType[_models.AutoScaleRun] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(evaluate_auto_scale_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_evaluate_pool_auto_scale_request( pool_id=pool_id, @@ -1406,7 +1406,7 @@ async def evaluate_pool_auto_scale( async def resize_pool( self, pool_id: str, - content: _models.BatchPoolResizeContent, + resize_options: _models.BatchPoolResizeOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -1428,8 +1428,8 @@ async def resize_pool( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :param content: The options to use for resizing the pool. Required. - :type content: ~azure.batch.models.BatchPoolResizeContent + :param resize_options: The options to use for resizing the pool. Required. + :type resize_options: ~azure.batch.models.BatchPoolResizeOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -1479,7 +1479,7 @@ async def resize_pool( ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(resize_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_resize_pool_request( pool_id=pool_id, @@ -1636,7 +1636,7 @@ async def stop_pool_resize( async def replace_pool_properties( self, pool_id: str, - pool: _models.BatchPoolReplaceContent, + pool: _models.BatchPoolReplaceOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -1651,7 +1651,7 @@ async def replace_pool_properties( :param pool_id: The ID of the Pool to update. Required. :type pool_id: str :param pool: The options to use for replacing properties on the pool. Required. - :type pool: ~azure.batch.models.BatchPoolReplaceContent + :type pool: ~azure.batch.models.BatchPoolReplaceOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -1723,7 +1723,7 @@ async def replace_pool_properties( async def remove_nodes( self, pool_id: str, - content: _models.BatchNodeRemoveContent, + remove_options: _models.BatchNodeRemoveOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -1741,8 +1741,8 @@ async def remove_nodes( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :param content: The options to use for removing the node. Required. - :type content: ~azure.batch.models.BatchNodeRemoveContent + :param remove_options: The options to use for removing the node. Required. + :type remove_options: ~azure.batch.models.BatchNodeRemoveOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -1792,7 +1792,7 @@ async def remove_nodes( ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(remove_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_remove_nodes_request( pool_id=pool_id, @@ -1925,7 +1925,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -2038,7 +2038,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -2301,7 +2301,7 @@ async def get_job( async def update_job( self, job_id: str, - job: _models.BatchJobUpdateContent, + job: _models.BatchJobUpdateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -2320,7 +2320,7 @@ async def update_job( :param job_id: The ID of the Job whose properties you want to update. Required. :type job_id: str :param job: The options to use for updating the Job. Required. - :type job: ~azure.batch.models.BatchJobUpdateContent + :type job: ~azure.batch.models.BatchJobUpdateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -2533,7 +2533,7 @@ async def replace_job( async def disable_job( self, job_id: str, - content: _models.BatchJobDisableContent, + disable_options: _models.BatchJobDisableOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -2556,8 +2556,8 @@ async def disable_job( :param job_id: The ID of the Job to disable. Required. :type job_id: str - :param content: The options to use for disabling the Job. Required. - :type content: ~azure.batch.models.BatchJobDisableContent + :param disable_options: The options to use for disabling the Job. Required. + :type disable_options: ~azure.batch.models.BatchJobDisableOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -2607,7 +2607,7 @@ async def disable_job( ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(disable_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_disable_job_request( job_id=job_id, @@ -2763,7 +2763,7 @@ async def enable_job( async def terminate_job( self, job_id: str, - parameters: Optional[_models.BatchJobTerminateContent] = None, + options: Optional[_models.BatchJobTerminateOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -2785,8 +2785,8 @@ async def terminate_job( :param job_id: The ID of the Job to terminate. Required. :type job_id: str - :param parameters: The options to use for terminating the Job. Default value is None. - :type parameters: ~azure.batch.models.BatchJobTerminateContent + :param options: The options to use for terminating the Job. Default value is None. + :type options: ~azure.batch.models.BatchJobTerminateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -2839,8 +2839,8 @@ async def terminate_job( ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -2889,7 +2889,7 @@ async def terminate_job( @distributed_trace_async async def create_job( self, - job: _models.BatchJobCreateContent, + job: _models.BatchJobCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -2908,7 +2908,7 @@ async def create_job( engineers. :param job: The Job to be created. Required. - :type job: ~azure.batch.models.BatchJobCreateContent + :type job: ~azure.batch.models.BatchJobCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -3073,7 +3073,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3199,7 +3199,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3327,7 +3327,9 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized["value"]) + list_of_elem = _deserialize( + List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized.get("value", []) + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3609,7 +3611,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3815,7 +3817,7 @@ async def get_certificate( ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any - ) -> _models.GetCertificateResponse: + ) -> _models.BatchCertificate: """Gets information about the specified Certificate. :param thumbprint_algorithm: The algorithm used to derive the thumbprint parameter. This must @@ -3833,8 +3835,8 @@ async def get_certificate( :paramtype ocpdate: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] - :return: GetCertificateResponse. The GetCertificateResponse is compatible with MutableMapping - :rtype: ~azure.batch.models.GetCertificateResponse + :return: BatchCertificate. The BatchCertificate is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchCertificate :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -3848,7 +3850,7 @@ async def get_certificate( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.GetCertificateResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.BatchCertificate] = kwargs.pop("cls", None) _request = build_batch_get_certificate_request( thumbprint_algorithm=thumbprint_algorithm, @@ -3891,7 +3893,7 @@ async def get_certificate( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.GetCertificateResponse, response.json()) + deserialized = _deserialize(_models.BatchCertificate, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -4238,7 +4240,7 @@ async def get_job_schedule( async def update_job_schedule( self, job_schedule_id: str, - job_schedule: _models.BatchJobScheduleUpdateContent, + job_schedule: _models.BatchJobScheduleUpdateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -4259,7 +4261,7 @@ async def update_job_schedule( :param job_schedule_id: The ID of the Job Schedule to update. Required. :type job_schedule_id: str :param job_schedule: The options to use for updating the Job Schedule. Required. - :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateContent + :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -4791,7 +4793,7 @@ async def terminate_job_schedule( @distributed_trace_async async def create_job_schedule( self, - job_schedule: _models.BatchJobScheduleCreateContent, + job_schedule: _models.BatchJobScheduleCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -4802,7 +4804,7 @@ async def create_job_schedule( Creates a Job Schedule to the specified Account. :param job_schedule: The Job Schedule to be created. Required. - :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateContent + :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -4967,7 +4969,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -4994,7 +4996,7 @@ async def get_next(next_link=None): async def create_task( self, job_id: str, - task: _models.BatchTaskCreateContent, + task: _models.BatchTaskCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -5009,7 +5011,7 @@ async def create_task( :param job_id: The ID of the Job to which the Task is to be created. Required. :type job_id: str :param task: The Task to be created. Required. - :type task: ~azure.batch.models.BatchTaskCreateContent + :type task: ~azure.batch.models.BatchTaskCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -5181,7 +5183,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchTask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchTask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -5213,7 +5215,7 @@ async def create_task_collection( timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: + ) -> _models.BatchCreateTaskCollectionResult: """Adds a collection of Tasks to the specified Job. Note that each Task must have a unique ID. The Batch service may not return the @@ -5243,9 +5245,9 @@ async def create_task_collection( current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with - MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult + :return: BatchCreateTaskCollectionResult. The BatchCreateTaskCollectionResult is compatible + with MutableMapping + :rtype: ~azure.batch.models.BatchCreateTaskCollectionResult :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -5262,7 +5264,7 @@ async def create_task_collection( content_type: str = kwargs.pop( "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") ) - cls: ClsType[_models.BatchTaskAddCollectionResult] = kwargs.pop("cls", None) + cls: ClsType[_models.BatchCreateTaskCollectionResult] = kwargs.pop("cls", None) _content = json.dumps(task_collection, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore @@ -5307,7 +5309,7 @@ async def create_task_collection( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.BatchTaskAddCollectionResult, response.json()) + deserialized = _deserialize(_models.BatchCreateTaskCollectionResult, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -5760,7 +5762,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -6213,8 +6215,6 @@ async def get_task_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace_async async def _get_task_file_properties_internal( self, @@ -6271,7 +6271,7 @@ async def _get_task_file_properties_internal( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_batch_get_task_file_properties_request( + _request = build_batch_get_task_file_properties_internal_request( job_id=job_id, task_id=task_id, file_path=file_path, @@ -6421,7 +6421,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -6449,7 +6449,7 @@ async def create_node_user( self, pool_id: str, node_id: str, - user: _models.BatchNodeUserCreateContent, + user: _models.BatchNodeUserCreateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -6465,7 +6465,7 @@ async def create_node_user( :param node_id: The ID of the machine on which you want to create a user Account. Required. :type node_id: str :param user: The options to use for creating the user. Required. - :type user: ~azure.batch.models.BatchNodeUserCreateContent + :type user: ~azure.batch.models.BatchNodeUserCreateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -6621,7 +6621,7 @@ async def replace_node_user( pool_id: str, node_id: str, user_name: str, - content: _models.BatchNodeUserUpdateContent, + update_options: _models.BatchNodeUserUpdateOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -6640,8 +6640,8 @@ async def replace_node_user( :type node_id: str :param user_name: The name of the user Account to update. Required. :type user_name: str - :param content: The options to use for updating the user. Required. - :type content: ~azure.batch.models.BatchNodeUserUpdateContent + :param update_options: The options to use for updating the user. Required. + :type update_options: ~azure.batch.models.BatchNodeUserUpdateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -6670,7 +6670,7 @@ async def replace_node_user( ) cls: ClsType[None] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(update_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_replace_node_user_request( pool_id=pool_id, @@ -6810,7 +6810,7 @@ async def reboot_node( self, pool_id: str, node_id: str, - parameters: Optional[_models.BatchNodeRebootContent] = None, + options: Optional[_models.BatchNodeRebootOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -6824,8 +6824,8 @@ async def reboot_node( :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :param parameters: The options to use for rebooting the Compute Node. Default value is None. - :type parameters: ~azure.batch.models.BatchNodeRebootContent + :param options: The options to use for rebooting the Compute Node. Default value is None. + :type options: ~azure.batch.models.BatchNodeRebootOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -6854,8 +6854,8 @@ async def reboot_node( ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -6981,7 +6981,7 @@ async def reimage_node( self, pool_id: str, node_id: str, - parameters: Optional[_models.BatchNodeReimageContent] = None, + options: Optional[_models.BatchNodeReimageOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -6997,8 +6997,8 @@ async def reimage_node( :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :param parameters: The options to use for reimaging the Compute Node. Default value is None. - :type parameters: ~azure.batch.models.BatchNodeReimageContent + :param options: The options to use for reimaging the Compute Node. Default value is None. + :type options: ~azure.batch.models.BatchNodeReimageOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -7027,8 +7027,8 @@ async def reimage_node( ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -7075,7 +7075,7 @@ async def deallocate_node( self, pool_id: str, node_id: str, - parameters: Optional[_models.BatchNodeDeallocateContent] = None, + options: Optional[_models.BatchNodeDeallocateOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -7089,8 +7089,8 @@ async def deallocate_node( :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :param parameters: The options to use for deallocating the Compute Node. Default value is None. - :type parameters: ~azure.batch.models.BatchNodeDeallocateContent + :param options: The options to use for deallocating the Compute Node. Default value is None. + :type options: ~azure.batch.models.BatchNodeDeallocateOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -7119,8 +7119,8 @@ async def deallocate_node( ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -7167,7 +7167,7 @@ async def disable_node_scheduling( self, pool_id: str, node_id: str, - parameters: Optional[_models.BatchNodeDisableSchedulingContent] = None, + options: Optional[_models.BatchNodeDisableSchedulingOptions] = None, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -7183,9 +7183,9 @@ async def disable_node_scheduling( :param node_id: The ID of the Compute Node on which you want to disable Task scheduling. Required. :type node_id: str - :param parameters: The options to use for disabling scheduling on the Compute Node. Default - value is None. - :type parameters: ~azure.batch.models.BatchNodeDisableSchedulingContent + :param options: The options to use for disabling scheduling on the Compute Node. Default value + is None. + :type options: ~azure.batch.models.BatchNodeDisableSchedulingOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -7214,8 +7214,8 @@ async def disable_node_scheduling( ) cls: ClsType[None] = kwargs.pop("cls", None) - if parameters is not None: - _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + if options is not None: + _content = json.dumps(options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore else: _content = None @@ -7436,7 +7436,7 @@ async def upload_node_logs( self, pool_id: str, node_id: str, - content: _models.UploadBatchServiceLogsContent, + upload_options: _models.UploadBatchServiceLogsOptions, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, @@ -7455,8 +7455,8 @@ async def upload_node_logs( :param node_id: The ID of the Compute Node for which you want to get the Remote Desktop Protocol file. Required. :type node_id: str - :param content: The Azure Batch service log files upload options. Required. - :type content: ~azure.batch.models.UploadBatchServiceLogsContent + :param upload_options: The Azure Batch service log files upload options. Required. + :type upload_options: ~azure.batch.models.UploadBatchServiceLogsOptions :keyword timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. @@ -7486,7 +7486,7 @@ async def upload_node_logs( ) cls: ClsType[_models.UploadBatchServiceLogsResult] = kwargs.pop("cls", None) - _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(upload_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_upload_node_logs_request( pool_id=pool_id, @@ -7635,7 +7635,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNode], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNode], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -7852,7 +7852,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -8080,8 +8080,6 @@ async def get_node_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace_async async def _get_node_file_properties_internal( self, @@ -8138,7 +8136,7 @@ async def _get_node_file_properties_internal( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_batch_get_node_file_properties_request( + _request = build_batch_get_node_file_properties_internal_request( pool_id=pool_id, node_id=node_id, file_path=file_path, @@ -8286,7 +8284,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py index b3c12ac94cfb..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py @@ -1,523 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio -import datetime -import collections -import logging -from typing import Any, Deque, AsyncIterator, List, Iterable, Optional, Union +from typing import List -from azure.batch import models as _models -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from ._operations import ( - BatchClientOperationsMixin as BatchClientOperationsMixinGenerated, -) - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -__all__: List[str] = [ - "BatchClientOperationsMixin" -] # Add all objects you want publicly available to users at this package level - - -class BatchClientOperationsMixin(BatchClientOperationsMixinGenerated): - """Customize generated code""" - - # create_task_collection renamed - @distributed_trace - async def create_tasks( - self, - job_id: str, - task_collection: List[_models.BatchTaskCreateContent], - concurrencies: int = 0, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: - """Adds a collection of Tasks to the specified Job. - - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. - - :param job_id: The ID of the Job to which the Task collection is to be added. Required. - :type job_id: str - :param task_collection: The Tasks to be added. Required. - :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrency: number of coroutines to use in parallel when adding tasks. If specified - and greater than 0, will start additional coroutines to submit requests and wait for them to finish. - Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrency: int - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword content_type: Type of content. Default value is "application/json; - odata=minimalmetadata". - :paramtype content_type: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult - :raises ~azure.batch.custom.CreateTasksError - """ - - kwargs.update({"timeout": timeout, "ocpdate": ocpdate}) - - results_queue: Deque[_models.BatchTaskAddResult] = collections.deque() - task_workflow_manager = _TaskWorkflowManager( - self, job_id=job_id, task_collection=task_collection, **kwargs - ) - - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") - - coroutines = [] - for i in range(concurrencies): - coroutines.append(task_workflow_manager.task_collection_handler(results_queue)) - await asyncio.gather(*coroutines) - else: - await task_workflow_manager.task_collection_handler(results_queue) - - # Only define error if all coroutines have finished and there were failures - if task_workflow_manager.failure_tasks or task_workflow_manager.errors: - raise _models.CreateTasksError( - task_workflow_manager.tasks_to_add, - task_workflow_manager.failure_tasks, - task_workflow_manager.errors, - ) - else: - submitted_tasks = _handle_output(results_queue) - return _models.BatchTaskAddCollectionResult(value=submitted_tasks) - - @distributed_trace - async def get_node_file( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> AsyncIterator[bytes]: - """Returns the content of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - args = [pool_id, node_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return await super().get_node_file(*args, **kwargs) - - @distributed_trace - async def get_node_file_properties( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_node_file_properties_internal( # type: ignore - pool_id, - node_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - async def get_task_file_properties( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_task_file_properties_internal( # type: ignore - job_id, - task_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - async def get_task_file( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> AsyncIterator[bytes]: - """Returns the content of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return await super().get_task_file(*args, **kwargs) - - -class _TaskWorkflowManager: - """Worker class for one create_task_collection request - - :param str job_id: The ID of the job to which the task collection is to be - added. - :ivar tasks_to_add: The collection of tasks to add. - :vartype tasks_to_add: Iterable[~azure.batch.models.BatchTaskCreateContent] - :param task_create_task_collection_options: Additional parameters for the - operation - :type task_create_task_collection_options: :class:`TaskAddCollectionOptions - ` - """ - - def __init__( - self, - batch_client: BatchClientOperationsMixin, - job_id: str, - task_collection: Iterable[_models.BatchTaskCreateContent], - **kwargs - ): - # List of tasks which failed to add due to a returned client error - self.failure_tasks: Deque[_models.BatchTaskAddResult] = collections.deque() - # List of unknown exceptions which occurred during requests. - self.errors: Deque[Any] = collections.deque() - - # synchronized through lock variables - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - self.tasks_to_add = collections.deque(task_collection) - - # Variables to be used for task create_task_collection requests - self._batch_client = batch_client - self._job_id = job_id - - self._kwargs = kwargs - - async def _bulk_add_tasks( - self, - results_queue: collections.deque, - chunk_tasks_to_add: List[_models.BatchTaskCreateContent], - ): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :ivar chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :vartype chunk_tasks_to_add: list[~azure.batch.models.BatchTaskCreateContent] - """ - - try: - create_task_collection_response: _models.BatchTaskAddCollectionResult = ( - await self._batch_client.create_task_collection( - job_id=self._job_id, - task_collection=_models.BatchTaskGroup(value=chunk_tasks_to_add), - **self._kwargs - ) - ) - except HttpResponseError as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error and e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self.errors.appendleft(e) - _LOGGER.error( - "Failed to add task with ID %s due to the body" " exceeding the maximum request size", - failed_task.id, - ) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - if midpoint < self._max_tasks_per_request: - _LOGGER.info( - "Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", - str(self._max_tasks_per_request), - str(midpoint), - ) - self._max_tasks_per_request = midpoint - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another coroutines . - self.tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - await self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: # type: ignore - self.tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - else: - if create_task_collection_response.value: - for task_result in create_task_collection_response.value: - if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: - # Server error will be retried - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self.tasks_to_add.appendleft(task) - elif ( - task_result.status == _models.BatchTaskAddStatus.CLIENT_ERROR - and not (task_result.error and task_result.error.code == "TaskExists") - ): - # Client error will be recorded unless Task already exists - self.failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - async def task_collection_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self.tasks_to_add and not self.errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: - chunk_tasks_to_add.append(self.tasks_to_add.pop()) - - if chunk_tasks_to_add: - await self._bulk_add_tasks(results_queue, chunk_tasks_to_add) +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -527,20 +19,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted create_task_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results diff --git a/sdk/batch/azure-batch/azure/batch/aio/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_patch.py index 64a3f1262c22..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_patch.py @@ -1,55 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from typing import List -from ._client import BatchClient as GenerateBatchClient -from .._patch import BatchSharedKeyAuthPolicy -from azure.core.credentials import TokenCredential - - -from azure.core.credentials import AzureNamedKeyCredential - - -from typing import Union - -__all__ = [ - "BatchClient", -] # Add all objects you want publicly available to users at this package level - - -class BatchClient(GenerateBatchClient): - """BatchClient. - - :param endpoint: HTTP or HTTPS endpoint for the Web PubSub service instance. - :type endpoint: str - :param hub: Target hub name, which should start with alphabetic characters and only contain - alpha-numeric characters or underscore. - :type hub: str - :param credentials: Credential needed for the client to connect to Azure. - :type credentials: ~azure.identity.ClientSecretCredential, ~azure.core.credentials.AzureNamedKeyCredential, - or ~azure.identity.TokenCredentials - :keyword api_version: Api Version. The default value is "2021-10-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: Union[AzureNamedKeyCredential, TokenCredential], **kwargs): - super().__init__( - endpoint=endpoint, - credential=credential, # type: ignore - authentication_policy=kwargs.pop("authentication_policy", self._format_shared_key_credential(credential)), - **kwargs - ) - - def _format_shared_key_credential(self, credential): - if isinstance(credential, AzureNamedKeyCredential): - return BatchSharedKeyAuthPolicy(credential) - return None +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/batch/azure-batch/azure/batch/aio/_vendor.py b/sdk/batch/azure-batch/azure/batch/aio/_vendor.py deleted file mode 100644 index baee6ee7264c..000000000000 --- a/sdk/batch/azure-batch/azure/batch/aio/_vendor.py +++ /dev/null @@ -1,57 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from abc import ABC -from typing import Optional, TYPE_CHECKING - -from azure.core import MatchConditions - -from ._configuration import BatchClientConfiguration - -if TYPE_CHECKING: - from azure.core import AsyncPipelineClient - - from .._serialization import Deserializer, Serializer - - -class BatchClientMixinABC(ABC): - """DO NOT use this class. It is for internal typing use only.""" - - _client: "AsyncPipelineClient" - _config: BatchClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" - - -def quote_etag(etag: Optional[str]) -> Optional[str]: - if not etag or etag == "*": - return etag - if etag.startswith("W/"): - return etag - if etag.startswith('"') and etag.endswith('"'): - return etag - if etag.startswith("'") and etag.endswith("'"): - return etag - return '"' + etag + '"' - - -def prep_if_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: - if match_condition == MatchConditions.IfNotModified: - if_match = quote_etag(etag) if etag else None - return if_match - if match_condition == MatchConditions.IfPresent: - return "*" - return None - - -def prep_if_none_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: - if match_condition == MatchConditions.IfModified: - if_none_match = quote_etag(etag) if etag else None - return if_none_match - if match_condition == MatchConditions.IfMissing: - return "*" - return None diff --git a/sdk/batch/azure-batch/azure/batch/models/__init__.py b/sdk/batch/azure-batch/azure/batch/models/__init__.py index c54aed6d5845..54ace408770f 100644 --- a/sdk/batch/azure-batch/azure/batch/models/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/models/__init__.py @@ -14,7 +14,6 @@ from ._models import ( # type: ignore - AffinityInfo, AuthenticationTokenSettings, AutoScaleRun, AutoScaleRunError, @@ -22,18 +21,24 @@ AutomaticOsUpgradePolicy, AzureBlobFileSystemConfiguration, AzureFileShareConfiguration, + BatchAffinityInfo, BatchApplication, BatchApplicationPackageReference, BatchAutoPoolSpecification, BatchCertificate, + BatchCertificateDeleteError, BatchCertificateReference, + BatchContainerConfiguration, + BatchCreateTaskCollectionResult, + BatchDiffDiskSettings, BatchError, BatchErrorDetail, BatchErrorMessage, + BatchInboundNatPool, BatchJob, BatchJobConstraints, - BatchJobCreateContent, - BatchJobDisableContent, + BatchJobCreateOptions, + BatchJobDisableOptions, BatchJobExecutionInfo, BatchJobManagerTask, BatchJobNetworkConfiguration, @@ -44,62 +49,64 @@ BatchJobReleaseTaskExecutionInfo, BatchJobSchedule, BatchJobScheduleConfiguration, - BatchJobScheduleCreateContent, + BatchJobScheduleCreateOptions, BatchJobScheduleExecutionInfo, BatchJobScheduleStatistics, - BatchJobScheduleUpdateContent, + BatchJobScheduleUpdateOptions, BatchJobSchedulingError, BatchJobSpecification, BatchJobStatistics, - BatchJobTerminateContent, - BatchJobUpdateContent, + BatchJobTerminateOptions, + BatchJobUpdateOptions, + BatchMetadataItem, BatchNode, BatchNodeAgentInfo, BatchNodeCounts, - BatchNodeDeallocateContent, - BatchNodeDisableSchedulingContent, + BatchNodeDeallocateOptions, + BatchNodeDisableSchedulingOptions, BatchNodeEndpointConfiguration, BatchNodeError, BatchNodeFile, BatchNodeIdentityReference, BatchNodeInfo, BatchNodePlacementConfiguration, - BatchNodeRebootContent, - BatchNodeReimageContent, + BatchNodeRebootOptions, + BatchNodeReimageOptions, BatchNodeRemoteLoginSettings, - BatchNodeRemoveContent, - BatchNodeUserCreateContent, - BatchNodeUserUpdateContent, + BatchNodeRemoveOptions, + BatchNodeUserCreateOptions, + BatchNodeUserUpdateOptions, BatchNodeVMExtension, + BatchOsDisk, BatchPool, - BatchPoolCreateContent, - BatchPoolEnableAutoScaleContent, + BatchPoolAutoScaleEnableOptions, + BatchPoolAutoScaleEvaluateOptions, + BatchPoolCreateOptions, BatchPoolEndpointConfiguration, - BatchPoolEvaluateAutoScaleContent, BatchPoolIdentity, BatchPoolInfo, BatchPoolNodeCounts, - BatchPoolReplaceContent, - BatchPoolResizeContent, + BatchPoolReplaceOptions, + BatchPoolResizeOptions, BatchPoolResourceStatistics, BatchPoolSpecification, BatchPoolStatistics, - BatchPoolUpdateContent, + BatchPoolUpdateOptions, BatchPoolUsageMetrics, BatchPoolUsageStatistics, + BatchPublicIpAddressConfiguration, BatchStartTask, BatchStartTaskInfo, BatchSubtask, BatchSupportedImage, BatchTask, - BatchTaskAddCollectionResult, - BatchTaskAddResult, BatchTaskConstraints, BatchTaskContainerExecutionInfo, BatchTaskContainerSettings, BatchTaskCounts, BatchTaskCountsResult, - BatchTaskCreateContent, + BatchTaskCreateOptions, + BatchTaskCreateResult, BatchTaskDependencies, BatchTaskExecutionInfo, BatchTaskFailureInfo, @@ -109,13 +116,13 @@ BatchTaskSchedulingPolicy, BatchTaskSlotCounts, BatchTaskStatistics, + BatchUefiSettings, + BatchUserAssignedIdentity, + BatchVmImageReference, CifsMountConfiguration, - ContainerConfiguration, ContainerHostBatchBindMountEntry, ContainerRegistryReference, DataDisk, - DeleteBatchCertificateError, - DiffDiskSettings, DiskEncryptionConfiguration, EnvironmentSetting, ExitCodeMapping, @@ -123,58 +130,52 @@ ExitConditions, ExitOptions, FileProperties, - GetCertificateResponse, - HttpHeader, - ImageReference, InboundEndpoint, - InboundNatPool, InstanceViewStatus, LinuxUserConfiguration, ManagedDisk, - MetadataItem, MountConfiguration, MultiInstanceSettings, NameValuePair, NetworkConfiguration, NetworkSecurityGroupRule, NfsMountConfiguration, - OSDisk, OutputFile, OutputFileBlobContainerDestination, OutputFileDestination, OutputFileUploadConfig, - PublicIpAddressConfiguration, + OutputFileUploadHeader, RecentBatchJob, ResizeError, ResourceFile, RollingUpgradePolicy, SecurityProfile, ServiceArtifactReference, - UefiSettings, UpgradePolicy, - UploadBatchServiceLogsContent, + UploadBatchServiceLogsOptions, UploadBatchServiceLogsResult, UserAccount, - UserAssignedIdentity, UserIdentity, - VMDiskSecurityProfile, VMExtension, VMExtensionInstanceView, VirtualMachineConfiguration, VirtualMachineInfo, + VmDiskSecurityProfile, WindowsConfiguration, WindowsUserConfiguration, ) from ._enums import ( # type: ignore - AccessScope, AllocationState, AutoUserScope, + BatchAccessScope, + BatchAllTasksCompleteMode, BatchCertificateFormat, BatchCertificateState, BatchCertificateStoreLocation, BatchCertificateVisibility, - BatchJobAction, + BatchErrorSourceCategory, + BatchJobActionKind, BatchJobPreparationTaskState, BatchJobReleaseTaskState, BatchJobScheduleState, @@ -185,7 +186,7 @@ BatchNodeDisableSchedulingOption, BatchNodeFillType, BatchNodePlacementPolicyType, - BatchNodeRebootOption, + BatchNodeRebootKind, BatchNodeReimageOption, BatchNodeState, BatchPoolIdentityType, @@ -195,6 +196,7 @@ BatchSubtaskState, BatchTaskAddStatus, BatchTaskExecutionResult, + BatchTaskFailureMode, BatchTaskState, CachingType, ContainerHostDataPath, @@ -206,15 +208,12 @@ DiskEncryptionTarget, DynamicVNetAssignmentScope, ElevationLevel, - ErrorCategory, ImageVerificationType, InboundEndpointProtocol, IpAddressProvisioningType, LoginMode, NetworkSecurityGroupRuleAccess, OSType, - OnAllBatchTasksComplete, - OnBatchTaskFailure, OutputFileUploadCondition, SchedulingState, SecurityEncryptionTypes, @@ -228,7 +227,6 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "AffinityInfo", "AuthenticationTokenSettings", "AutoScaleRun", "AutoScaleRunError", @@ -236,18 +234,24 @@ "AutomaticOsUpgradePolicy", "AzureBlobFileSystemConfiguration", "AzureFileShareConfiguration", + "BatchAffinityInfo", "BatchApplication", "BatchApplicationPackageReference", "BatchAutoPoolSpecification", "BatchCertificate", + "BatchCertificateDeleteError", "BatchCertificateReference", + "BatchContainerConfiguration", + "BatchCreateTaskCollectionResult", + "BatchDiffDiskSettings", "BatchError", "BatchErrorDetail", "BatchErrorMessage", + "BatchInboundNatPool", "BatchJob", "BatchJobConstraints", - "BatchJobCreateContent", - "BatchJobDisableContent", + "BatchJobCreateOptions", + "BatchJobDisableOptions", "BatchJobExecutionInfo", "BatchJobManagerTask", "BatchJobNetworkConfiguration", @@ -258,62 +262,64 @@ "BatchJobReleaseTaskExecutionInfo", "BatchJobSchedule", "BatchJobScheduleConfiguration", - "BatchJobScheduleCreateContent", + "BatchJobScheduleCreateOptions", "BatchJobScheduleExecutionInfo", "BatchJobScheduleStatistics", - "BatchJobScheduleUpdateContent", + "BatchJobScheduleUpdateOptions", "BatchJobSchedulingError", "BatchJobSpecification", "BatchJobStatistics", - "BatchJobTerminateContent", - "BatchJobUpdateContent", + "BatchJobTerminateOptions", + "BatchJobUpdateOptions", + "BatchMetadataItem", "BatchNode", "BatchNodeAgentInfo", "BatchNodeCounts", - "BatchNodeDeallocateContent", - "BatchNodeDisableSchedulingContent", + "BatchNodeDeallocateOptions", + "BatchNodeDisableSchedulingOptions", "BatchNodeEndpointConfiguration", "BatchNodeError", "BatchNodeFile", "BatchNodeIdentityReference", "BatchNodeInfo", "BatchNodePlacementConfiguration", - "BatchNodeRebootContent", - "BatchNodeReimageContent", + "BatchNodeRebootOptions", + "BatchNodeReimageOptions", "BatchNodeRemoteLoginSettings", - "BatchNodeRemoveContent", - "BatchNodeUserCreateContent", - "BatchNodeUserUpdateContent", + "BatchNodeRemoveOptions", + "BatchNodeUserCreateOptions", + "BatchNodeUserUpdateOptions", "BatchNodeVMExtension", + "BatchOsDisk", "BatchPool", - "BatchPoolCreateContent", - "BatchPoolEnableAutoScaleContent", + "BatchPoolAutoScaleEnableOptions", + "BatchPoolAutoScaleEvaluateOptions", + "BatchPoolCreateOptions", "BatchPoolEndpointConfiguration", - "BatchPoolEvaluateAutoScaleContent", "BatchPoolIdentity", "BatchPoolInfo", "BatchPoolNodeCounts", - "BatchPoolReplaceContent", - "BatchPoolResizeContent", + "BatchPoolReplaceOptions", + "BatchPoolResizeOptions", "BatchPoolResourceStatistics", "BatchPoolSpecification", "BatchPoolStatistics", - "BatchPoolUpdateContent", + "BatchPoolUpdateOptions", "BatchPoolUsageMetrics", "BatchPoolUsageStatistics", + "BatchPublicIpAddressConfiguration", "BatchStartTask", "BatchStartTaskInfo", "BatchSubtask", "BatchSupportedImage", "BatchTask", - "BatchTaskAddCollectionResult", - "BatchTaskAddResult", "BatchTaskConstraints", "BatchTaskContainerExecutionInfo", "BatchTaskContainerSettings", "BatchTaskCounts", "BatchTaskCountsResult", - "BatchTaskCreateContent", + "BatchTaskCreateOptions", + "BatchTaskCreateResult", "BatchTaskDependencies", "BatchTaskExecutionInfo", "BatchTaskFailureInfo", @@ -323,13 +329,13 @@ "BatchTaskSchedulingPolicy", "BatchTaskSlotCounts", "BatchTaskStatistics", + "BatchUefiSettings", + "BatchUserAssignedIdentity", + "BatchVmImageReference", "CifsMountConfiguration", - "ContainerConfiguration", "ContainerHostBatchBindMountEntry", "ContainerRegistryReference", "DataDisk", - "DeleteBatchCertificateError", - "DiffDiskSettings", "DiskEncryptionConfiguration", "EnvironmentSetting", "ExitCodeMapping", @@ -337,55 +343,49 @@ "ExitConditions", "ExitOptions", "FileProperties", - "GetCertificateResponse", - "HttpHeader", - "ImageReference", "InboundEndpoint", - "InboundNatPool", "InstanceViewStatus", "LinuxUserConfiguration", "ManagedDisk", - "MetadataItem", "MountConfiguration", "MultiInstanceSettings", "NameValuePair", "NetworkConfiguration", "NetworkSecurityGroupRule", "NfsMountConfiguration", - "OSDisk", "OutputFile", "OutputFileBlobContainerDestination", "OutputFileDestination", "OutputFileUploadConfig", - "PublicIpAddressConfiguration", + "OutputFileUploadHeader", "RecentBatchJob", "ResizeError", "ResourceFile", "RollingUpgradePolicy", "SecurityProfile", "ServiceArtifactReference", - "UefiSettings", "UpgradePolicy", - "UploadBatchServiceLogsContent", + "UploadBatchServiceLogsOptions", "UploadBatchServiceLogsResult", "UserAccount", - "UserAssignedIdentity", "UserIdentity", - "VMDiskSecurityProfile", "VMExtension", "VMExtensionInstanceView", "VirtualMachineConfiguration", "VirtualMachineInfo", + "VmDiskSecurityProfile", "WindowsConfiguration", "WindowsUserConfiguration", - "AccessScope", "AllocationState", "AutoUserScope", + "BatchAccessScope", + "BatchAllTasksCompleteMode", "BatchCertificateFormat", "BatchCertificateState", "BatchCertificateStoreLocation", "BatchCertificateVisibility", - "BatchJobAction", + "BatchErrorSourceCategory", + "BatchJobActionKind", "BatchJobPreparationTaskState", "BatchJobReleaseTaskState", "BatchJobScheduleState", @@ -396,7 +396,7 @@ "BatchNodeDisableSchedulingOption", "BatchNodeFillType", "BatchNodePlacementPolicyType", - "BatchNodeRebootOption", + "BatchNodeRebootKind", "BatchNodeReimageOption", "BatchNodeState", "BatchPoolIdentityType", @@ -406,6 +406,7 @@ "BatchSubtaskState", "BatchTaskAddStatus", "BatchTaskExecutionResult", + "BatchTaskFailureMode", "BatchTaskState", "CachingType", "ContainerHostDataPath", @@ -417,15 +418,12 @@ "DiskEncryptionTarget", "DynamicVNetAssignmentScope", "ElevationLevel", - "ErrorCategory", "ImageVerificationType", "InboundEndpointProtocol", "IpAddressProvisioningType", "LoginMode", "NetworkSecurityGroupRuleAccess", "OSType", - "OnAllBatchTasksComplete", - "OnBatchTaskFailure", "OutputFileUploadCondition", "SchedulingState", "SecurityEncryptionTypes", diff --git a/sdk/batch/azure-batch/azure/batch/models/_enums.py b/sdk/batch/azure-batch/azure/batch/models/_enums.py index c6adc7cc362d..5aa74e9b2cc6 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_enums.py +++ b/sdk/batch/azure-batch/azure/batch/models/_enums.py @@ -10,13 +10,6 @@ from azure.core import CaseInsensitiveEnumMeta -class AccessScope(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """AccessScope enums.""" - - JOB = "job" - """Grants access to perform all operations on the Job containing the Task.""" - - class AllocationState(str, Enum, metaclass=CaseInsensitiveEnumMeta): """AllocationState enums.""" @@ -41,6 +34,22 @@ class AutoUserScope(str, Enum, metaclass=CaseInsensitiveEnumMeta): Node in a Pool.""" +class BatchAccessScope(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchAccessScope enums.""" + + JOB = "job" + """Grants access to perform all operations on the Job containing the Task.""" + + +class BatchAllTasksCompleteMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The action the Batch service should take when all Tasks in the Job are in the completed state.""" + + NO_ACTION = "noaction" + """Do nothing. The Job remains active unless terminated or disabled by some other means.""" + TERMINATE_JOB = "terminatejob" + """Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'.""" + + class BatchCertificateFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): """BatchCertificateFormat enums.""" @@ -90,7 +99,16 @@ class BatchCertificateVisibility(str, Enum, metaclass=CaseInsensitiveEnumMeta): Compute Node.""" -class BatchJobAction(str, Enum, metaclass=CaseInsensitiveEnumMeta): +class BatchErrorSourceCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchErrorSourceCategory enums.""" + + USER_ERROR = "usererror" + """The error is due to a user issue, such as misconfiguration.""" + SERVER_ERROR = "servererror" + """The error is due to an internal server issue.""" + + +class BatchJobActionKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): """BatchJobAction enums.""" NONE = "none" @@ -256,8 +274,8 @@ class BatchNodePlacementPolicyType(str, Enum, metaclass=CaseInsensitiveEnumMeta) balancing.""" -class BatchNodeRebootOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """BatchNodeRebootOption enums.""" +class BatchNodeRebootKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodeRebootKind enums.""" REQUEUE = "requeue" """Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute @@ -421,6 +439,15 @@ class BatchTaskExecutionResult(str, Enum, metaclass=CaseInsensitiveEnumMeta): process was launched, while the Task process was executing, or after the Task process exited.""" +class BatchTaskFailureMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """TaskFailure enums.""" + + NO_ACTION = "noaction" + """Do nothing. The Job remains active unless terminated or disabled by some other means.""" + PERFORM_EXIT_OPTIONS_JOB_ACTION = "performexitoptionsjobaction" + """Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'.""" + + class BatchTaskState(str, Enum, metaclass=CaseInsensitiveEnumMeta): """BatchTaskState enums.""" @@ -558,15 +585,6 @@ class ElevationLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The user is a user with elevated access and operates with full Administrator permissions.""" -class ErrorCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """ErrorCategory enums.""" - - USER_ERROR = "usererror" - """The error is due to a user issue, such as misconfiguration.""" - SERVER_ERROR = "servererror" - """The error is due to an internal server issue.""" - - class ImageVerificationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """ImageVerificationType enums.""" @@ -622,24 +640,6 @@ class NetworkSecurityGroupRuleAccess(str, Enum, metaclass=CaseInsensitiveEnumMet """Deny access.""" -class OnAllBatchTasksComplete(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The action the Batch service should take when all Tasks in the Job are in the completed state.""" - - NO_ACTION = "noaction" - """Do nothing. The Job remains active unless terminated or disabled by some other means.""" - TERMINATE_JOB = "terminatejob" - """Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'.""" - - -class OnBatchTaskFailure(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """OnTaskFailure enums.""" - - NO_ACTION = "noaction" - """Do nothing. The Job remains active unless terminated or disabled by some other means.""" - PERFORM_EXIT_OPTIONS_JOB_ACTION = "performexitoptionsjobaction" - """Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'.""" - - class OSType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """OSType enums.""" diff --git a/sdk/batch/azure-batch/azure/batch/models/_models.py b/sdk/batch/azure-batch/azure/batch/models/_models.py index 842decf5de5c..8d3edc221af6 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_models.py +++ b/sdk/batch/azure-batch/azure/batch/models/_models.py @@ -11,50 +11,13 @@ import datetime from typing import Any, Dict, List, Mapping, Optional, TYPE_CHECKING, Union, overload -from .. import _model_base -from .._model_base import rest_field +from .._utils.model_base import Model as _Model, rest_field if TYPE_CHECKING: from .. import models as _models -class AffinityInfo(_model_base.Model): - """A locality hint that can be used by the Batch service to select a Compute Node - on which to start a Task. - - :ivar affinity_id: An opaque string representing the location of a Compute Node or a Task that - has run previously. You can pass the affinityId of a Node to indicate that this Task needs to - run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is - busy or unavailable at the time the Task is scheduled, then the Task will be scheduled - elsewhere. Required. - :vartype affinity_id: str - """ - - affinity_id: str = rest_field(name="affinityId", visibility=["read", "create", "update", "delete", "query"]) - """An opaque string representing the location of a Compute Node or a Task that has run previously. - You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute - Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable - at the time the Task is scheduled, then the Task will be scheduled elsewhere. Required.""" - - @overload - def __init__( - self, - *, - affinity_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class AuthenticationTokenSettings(_model_base.Model): +class AuthenticationTokenSettings(_Model): """The settings for an authentication token that the Task can use to perform Batch service operations. @@ -62,10 +25,10 @@ class AuthenticationTokenSettings(_model_base.Model): grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. - :vartype access: list[str or ~azure.batch.models.AccessScope] + :vartype access: list[str or ~azure.batch.models.BatchAccessScope] """ - access: Optional[List[Union[str, "_models.AccessScope"]]] = rest_field( + access: Optional[List[Union[str, "_models.BatchAccessScope"]]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The Batch resources to which the token grants access. The authentication token grants access to @@ -77,7 +40,7 @@ class AuthenticationTokenSettings(_model_base.Model): def __init__( self, *, - access: Optional[List[Union[str, "_models.AccessScope"]]] = None, + access: Optional[List[Union[str, "_models.BatchAccessScope"]]] = None, ) -> None: ... @overload @@ -91,7 +54,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AutomaticOsUpgradePolicy(_model_base.Model): +class AutomaticOsUpgradePolicy(_Model): """The configuration parameters used for performing automatic OS upgrade. :ivar disable_automatic_rollback: Whether OS image rollback feature should be disabled. @@ -154,7 +117,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AutoScaleRun(_model_base.Model): +class AutoScaleRun(_Model): """The results and errors from an execution of a Pool autoscale formula. :ivar timestamp: The time at which the autoscale formula was last evaluated. Required. @@ -202,7 +165,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AutoScaleRunError(_model_base.Model): +class AutoScaleRunError(_Model): """An error that occurred when executing or evaluating a Pool autoscale formula. :ivar code: An identifier for the autoscale error. Codes are invariant and are intended to be @@ -246,7 +209,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AutoUserSpecification(_model_base.Model): +class AutoUserSpecification(_Model): """Specifies the options for the auto user that runs an Azure Batch Task. :ivar scope: The scope for the auto user. The default value is pool. If the pool is running @@ -293,7 +256,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AzureBlobFileSystemConfiguration(_model_base.Model): +class AzureBlobFileSystemConfiguration(_Model): """Information used to connect to an Azure Storage Container using Blobfuse. :ivar account_name: The Azure Storage Account name. Required. @@ -372,16 +335,16 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AzureFileShareConfiguration(_model_base.Model): +class AzureFileShareConfiguration(_Model): """Information used to connect to an Azure Fileshare. :ivar account_name: The Azure Storage account name. Required. :vartype account_name: str + :ivar account_key: The Azure Storage account key. Required. + :vartype account_key: str :ivar azure_file_url: The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. Required. :vartype azure_file_url: str - :ivar account_key: The Azure Storage account key. Required. - :vartype account_key: str :ivar relative_mount_path: The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. Required. @@ -393,10 +356,10 @@ class AzureFileShareConfiguration(_model_base.Model): account_name: str = rest_field(name="accountName", visibility=["read", "create", "update", "delete", "query"]) """The Azure Storage account name. Required.""" - azure_file_url: str = rest_field(name="azureFileUrl", visibility=["read", "create", "update", "delete", "query"]) - """The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. Required.""" account_key: str = rest_field(name="accountKey", visibility=["read", "create", "update", "delete", "query"]) """The Azure Storage account key. Required.""" + azure_file_url: str = rest_field(name="azureFileUrl", visibility=["read", "create", "update", "delete", "query"]) + """The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. Required.""" relative_mount_path: str = rest_field( name="relativeMountPath", visibility=["read", "create", "update", "delete", "query"] ) @@ -414,8 +377,8 @@ def __init__( self, *, account_name: str, - azure_file_url: str, account_key: str, + azure_file_url: str, relative_mount_path: str, mount_options: Optional[str] = None, ) -> None: ... @@ -431,7 +394,43 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchApplication(_model_base.Model): +class BatchAffinityInfo(_Model): + """A locality hint that can be used by the Batch service to select a Compute Node + on which to start a Task. + + :ivar affinity_id: An opaque string representing the location of a Compute Node or a Task that + has run previously. You can pass the affinityId of a Node to indicate that this Task needs to + run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is + busy or unavailable at the time the Task is scheduled, then the Task will be scheduled + elsewhere. Required. + :vartype affinity_id: str + """ + + affinity_id: str = rest_field(name="affinityId", visibility=["read", "create", "update", "delete", "query"]) + """An opaque string representing the location of a Compute Node or a Task that has run previously. + You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute + Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable + at the time the Task is scheduled, then the Task will be scheduled elsewhere. Required.""" + + @overload + def __init__( + self, + *, + affinity_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchApplication(_Model): """Contains information about an application in an Azure Batch Account. :ivar id: A string that uniquely identifies the application within the Account. Required. @@ -469,7 +468,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchApplicationPackageReference(_model_base.Model): +class BatchApplicationPackageReference(_Model): """A reference to an Package to be deployed to Compute Nodes. :ivar application_id: The ID of the application to deploy. When creating a pool, the package's @@ -516,7 +515,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchAutoPoolSpecification(_model_base.Model): +class BatchAutoPoolSpecification(_Model): """Specifies characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. @@ -583,7 +582,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchCertificate(_model_base.Model): +class BatchCertificate(_Model): """A Certificate that can be installed on Compute Nodes and can be used to authenticate operations on the machine. @@ -611,9 +610,9 @@ class BatchCertificate(_model_base.Model): :vartype public_data: str :ivar delete_certificate_error: The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. - :vartype delete_certificate_error: ~azure.batch.models.DeleteBatchCertificateError + :vartype delete_certificate_error: ~azure.batch.models.BatchCertificateDeleteError :ivar data: The base64-encoded contents of the Certificate. The maximum size is 10KB. Required. - :vartype data: str + :vartype data: bytes :ivar certificate_format: The format of the Certificate data. Known values are: "pfx" and "cer". :vartype certificate_format: str or ~azure.batch.models.BatchCertificateFormat @@ -649,12 +648,12 @@ class BatchCertificate(_model_base.Model): Certificate is in its initial Active state.""" public_data: Optional[str] = rest_field(name="publicData", visibility=["read"]) """The public part of the Certificate as a base-64 encoded .cer file.""" - delete_certificate_error: Optional["_models.DeleteBatchCertificateError"] = rest_field( + delete_certificate_error: Optional["_models.BatchCertificateDeleteError"] = rest_field( name="deleteCertificateError", visibility=["read"] ) """The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state.""" - data: str = rest_field(visibility=["create"]) + data: bytes = rest_field(visibility=["create"], format="base64") """The base64-encoded contents of the Certificate. The maximum size is 10KB. Required.""" certificate_format: Optional[Union[str, "_models.BatchCertificateFormat"]] = rest_field( name="certificateFormat", visibility=["create"] @@ -670,7 +669,7 @@ def __init__( *, thumbprint: str, thumbprint_algorithm: str, - data: str, + data: bytes, certificate_format: Optional[Union[str, "_models.BatchCertificateFormat"]] = None, password: Optional[str] = None, ) -> None: ... @@ -686,7 +685,57 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchCertificateReference(_model_base.Model): +class BatchCertificateDeleteError(_Model): + """An error encountered by the Batch service when deleting a Certificate. + + :ivar code: An identifier for the Certificate deletion error. Codes are invariant and are + intended to be consumed programmatically. + :vartype code: str + :ivar message: A message describing the Certificate deletion error, intended to be suitable for + display in a user interface. + :vartype message: str + :ivar values_property: A list of additional error details related to the Certificate deletion + error. This list includes details such as the active Pools and Compute Nodes referencing this + Certificate. However, if a large number of resources reference the Certificate, the list + contains only about the first hundred. + :vartype values_property: list[~azure.batch.models.NameValuePair] + """ + + code: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An identifier for the Certificate deletion error. Codes are invariant and are intended to be + consumed programmatically.""" + message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A message describing the Certificate deletion error, intended to be suitable for display in a + user interface.""" + values_property: Optional[List["_models.NameValuePair"]] = rest_field( + name="values", visibility=["read", "create", "update", "delete", "query"] + ) + """A list of additional error details related to the Certificate deletion error. This list + includes details such as the active Pools and Compute Nodes referencing this Certificate. + However, if a large number of resources reference the Certificate, the list contains only about + the first hundred.""" + + @overload + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + values_property: Optional[List["_models.NameValuePair"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchCertificateReference(_Model): """A reference to a Certificate to be installed on Compute Nodes in a Pool. Warning: This object is deprecated and will be removed after February, 2024. Please use the `Azure KeyVault Extension `_ @@ -774,7 +823,138 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchError(_model_base.Model): +class BatchContainerConfiguration(_Model): + """The configuration for container-enabled Pools. + + :ivar type: The container technology to be used. Required. Known values are: "dockerCompatible" + and "criCompatible". + :vartype type: str or ~azure.batch.models.ContainerType + :ivar container_image_names: The collection of container Image names. This is the full Image + reference, as would be specified to "docker pull". An Image will be sourced from the default + Docker registry unless the Image is fully qualified with an alternative registry. + :vartype container_image_names: list[str] + :ivar container_registries: Additional private registries from which containers can be pulled. + If any Images must be downloaded from a private registry which requires credentials, then those + credentials must be provided here. + :vartype container_registries: list[~azure.batch.models.ContainerRegistryReference] + """ + + type: Union[str, "_models.ContainerType"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The container technology to be used. Required. Known values are: \"dockerCompatible\" and + \"criCompatible\".""" + container_image_names: Optional[List[str]] = rest_field( + name="containerImageNames", visibility=["read", "create", "update", "delete", "query"] + ) + """The collection of container Image names. This is the full Image reference, as would be + specified to \"docker pull\". An Image will be sourced from the default Docker registry unless + the Image is fully qualified with an alternative registry.""" + container_registries: Optional[List["_models.ContainerRegistryReference"]] = rest_field( + name="containerRegistries", visibility=["read", "create", "update", "delete", "query"] + ) + """Additional private registries from which containers can be pulled. If any Images must be + downloaded from a private registry which requires credentials, then those credentials must be + provided here.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.ContainerType"], + container_image_names: Optional[List[str]] = None, + container_registries: Optional[List["_models.ContainerRegistryReference"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchCreateTaskCollectionResult(_Model): + """The result of creating a collection of Tasks to a Job. + + :ivar values_property: The results of the create Task collection operation. + :vartype values_property: list[~azure.batch.models.BatchTaskCreateResult] + """ + + values_property: Optional[List["_models.BatchTaskCreateResult"]] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) + """The results of the create Task collection operation.""" + + @overload + def __init__( + self, + *, + values_property: Optional[List["_models.BatchTaskCreateResult"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchDiffDiskSettings(_Model): + """Specifies the ephemeral Disk Settings for the operating system disk used by the + compute node (VM). + + :ivar placement: Specifies the ephemeral disk placement for operating system disk for all VMs + in the pool. This property can be used by user in the request to choose the location e.g., + cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk + size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at + `https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements + `_ + and Linux VMs at + `https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements + `_. + "cachedisk" + :vartype placement: str or ~azure.batch.models.DiffDiskPlacement + """ + + placement: Optional[Union[str, "_models.DiffDiskPlacement"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This + property can be used by user in the request to choose the location e.g., cache disk space for + Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, + please refer to Ephemeral OS disk size requirements for Windows VMs at + `https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements + `_ + and Linux VMs at + `https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements + `_. + \"cachedisk\"""" + + @overload + def __init__( + self, + *, + placement: Optional[Union[str, "_models.DiffDiskPlacement"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchError(_Model): """An error response received from the Azure Batch service. :ivar code: An identifier for the error. Codes are invariant and are intended to be consumed @@ -820,7 +1000,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchErrorDetail(_model_base.Model): +class BatchErrorDetail(_Model): """An item of additional information included in an Azure Batch error response. :ivar key: An identifier specifying the meaning of the Value property. @@ -853,7 +1033,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchErrorMessage(_model_base.Model): +class BatchErrorMessage(_Model): """An error message received in an Azure Batch error response. :ivar lang: The language code of the error message. @@ -886,12 +1066,112 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJob(_model_base.Model): - """An Azure Batch Job. +class BatchInboundNatPool(_Model): + """A inbound NAT Pool that can be used to address specific ports on Compute Nodes + in a Batch Pool externally. - :ivar id: A string that uniquely identifies the Job within the Account. The ID is - case-preserving and case-insensitive (that is, you may not have two IDs within an Account that - differ only by case). + :ivar name: The name of the endpoint. The name must be unique within a Batch Pool, can contain + letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, + must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid + values are provided the request fails with HTTP status code 400. Required. + :vartype name: str + :ivar protocol: The protocol of the endpoint. Required. Known values are: "tcp" and "udp". + :vartype protocol: str or ~azure.batch.models.InboundEndpointProtocol + :ivar backend_port: The port number on the Compute Node. This must be unique within a Batch + Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are + reserved. If any reserved values are provided the request fails with HTTP status code 400. + Required. + :vartype backend_port: int + :ivar frontend_port_range_start: The first port number in the range of external ports that will + be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable + values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All + ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 + ports. If any reserved or overlapping values are provided the request fails with HTTP status + code 400. Required. + :vartype frontend_port_range_start: int + :ivar frontend_port_range_end: The last port number in the range of external ports that will be + used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable + values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the + Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must + contain at least 40 ports. If any reserved or overlapping values are provided the request fails + with HTTP status code 400. Required. + :vartype frontend_port_range_end: int + :ivar network_security_group_rules: A list of network security group rules that will be applied + to the endpoint. The maximum number of rules that can be specified across all the endpoints on + a Batch Pool is 25. If no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the maximum number of network + security group rules is exceeded the request fails with HTTP status code 400. + :vartype network_security_group_rules: list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, + numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end + with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values + are provided the request fails with HTTP status code 400. Required.""" + protocol: Union[str, "_models.InboundEndpointProtocol"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The protocol of the endpoint. Required. Known values are: \"tcp\" and \"udp\".""" + backend_port: int = rest_field(name="backendPort", visibility=["read", "create", "update", "delete", "query"]) + """The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values + are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved + values are provided the request fails with HTTP status code 400. Required.""" + frontend_port_range_start: int = rest_field( + name="frontendPortRangeStart", visibility=["read", "create", "update", "delete", "query"] + ) + """The first port number in the range of external ports that will be used to provide inbound + access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and + 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be + distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or + overlapping values are provided the request fails with HTTP status code 400. Required.""" + frontend_port_range_end: int = rest_field( + name="frontendPortRangeEnd", visibility=["read", "create", "update", "delete", "query"] + ) + """The last port number in the range of external ports that will be used to provide inbound access + to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 + except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a + Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any + reserved or overlapping values are provided the request fails with HTTP status code 400. + Required.""" + network_security_group_rules: Optional[List["_models.NetworkSecurityGroupRule"]] = rest_field( + name="networkSecurityGroupRules", visibility=["read", "create", "update", "delete", "query"] + ) + """A list of network security group rules that will be applied to the endpoint. The maximum number + of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network + security group rules are specified, a default rule will be created to allow inbound access to + the specified backendPort. If the maximum number of network security group rules is exceeded + the request fails with HTTP status code 400.""" + + @overload + def __init__( + self, + *, + name: str, + protocol: Union[str, "_models.InboundEndpointProtocol"], + backend_port: int, + frontend_port_range_start: int, + frontend_port_range_end: int, + network_security_group_rules: Optional[List["_models.NetworkSecurityGroupRule"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchJob(_Model): + """An Azure Batch Job. + + :ivar id: A string that uniquely identifies the Job within the Account. The ID is + case-preserving and case-insensitive (that is, you may not have two IDs within an Account that + differ only by case). :vartype id: str :ivar display_name: The display name for the Job. :vartype display_name: str @@ -954,28 +1234,28 @@ class BatchJob(_model_base.Model): :vartype common_environment_settings: list[~azure.batch.models.EnvironmentSetting] :ivar pool_info: The Pool settings associated with the Job. Required. :vartype pool_info: ~azure.batch.models.BatchPoolInfo - :ivar on_all_tasks_complete: The action the Batch service should take when all Tasks in the Job - are in the completed state. The default is noaction. Known values are: "noaction" and + :ivar all_tasks_complete_mode: The action the Batch service should take when all Tasks in the + Job are in the completed state. The default is noaction. Known values are: "noaction" and "terminatejob". - :vartype on_all_tasks_complete: str or ~azure.batch.models.OnAllBatchTasksComplete - :ivar on_task_failure: The action the Batch service should take when any Task in the Job fails. - A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task - completes with a non-zero exit code after exhausting its retry count, or if there was an error - starting the Task, for example due to a resource file download error. The default is noaction. - Known values are: "noaction" and "performexitoptionsjobaction". - :vartype on_task_failure: str or ~azure.batch.models.OnBatchTaskFailure + :vartype all_tasks_complete_mode: str or ~azure.batch.models.BatchAllTasksCompleteMode + :ivar task_failure_mode: The action the Batch service should take when any Task in the Job + fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the + Task completes with a non-zero exit code after exhausting its retry count, or if there was an + error starting the Task, for example due to a resource file download error. The default is + noaction. Known values are: "noaction" and "performexitoptionsjobaction". + :vartype task_failure_mode: str or ~azure.batch.models.BatchTaskFailureMode :ivar network_configuration: The network configuration for the Job. :vartype network_configuration: ~azure.batch.models.BatchJobNetworkConfiguration :ivar metadata: A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] :ivar execution_info: The execution information for the Job. :vartype execution_info: ~azure.batch.models.BatchJobExecutionInfo - :ivar stats: Resource usage statistics for the entire lifetime of the Job. This property is - populated only if the BatchJob was retrieved with an expand clause including the 'stats' - attribute; otherwise it is null. The statistics may not be immediately available. The Batch - service performs periodic roll-up of statistics. The typical delay is about 30 minutes. - :vartype stats: ~azure.batch.models.BatchJobStatistics + :ivar job_statistics: Resource usage statistics for the entire lifetime of the Job. This + property is populated only if the BatchJob was retrieved with an expand clause including the + 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The + Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + :vartype job_statistics: ~azure.batch.models.BatchJobStatistics """ id: Optional[str] = rest_field(visibility=["read"]) @@ -1059,12 +1339,12 @@ class BatchJob(_model_base.Model): name="poolInfo", visibility=["read", "create", "update", "delete", "query"] ) """The Pool settings associated with the Job. Required.""" - on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = rest_field( + all_tasks_complete_mode: Optional[Union[str, "_models.BatchAllTasksCompleteMode"]] = rest_field( name="onAllTasksComplete", visibility=["read", "create", "update", "delete", "query"] ) """The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. Known values are: \"noaction\" and \"terminatejob\".""" - on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = rest_field( + task_failure_mode: Optional[Union[str, "_models.BatchTaskFailureMode"]] = rest_field( name="onTaskFailure", visibility=["read"] ) """The action the Batch service should take when any Task in the Job fails. A Task is considered @@ -1076,14 +1356,14 @@ class BatchJob(_model_base.Model): name="networkConfiguration", visibility=["read"] ) """The network configuration for the Job.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code.""" execution_info: Optional["_models.BatchJobExecutionInfo"] = rest_field(name="executionInfo", visibility=["read"]) """The execution information for the Job.""" - stats: Optional["_models.BatchJobStatistics"] = rest_field(visibility=["read"]) + job_statistics: Optional["_models.BatchJobStatistics"] = rest_field(name="stats", visibility=["read"]) """Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs @@ -1098,8 +1378,8 @@ def __init__( # pylint: disable=too-many-locals allow_task_preemption: Optional[bool] = None, max_parallel_tasks: Optional[int] = None, constraints: Optional["_models.BatchJobConstraints"] = None, - on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + all_tasks_complete_mode: Optional[Union[str, "_models.BatchAllTasksCompleteMode"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, ) -> None: ... @overload @@ -1113,7 +1393,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobConstraints(_model_base.Model): +class BatchJobConstraints(_Model): """The execution constraints for a Job. :ivar max_wall_clock_time: The maximum elapsed time that the Job may run, measured from the @@ -1169,7 +1449,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobCreateContent(_model_base.Model): +class BatchJobCreateOptions(_Model): """Parameters for creating an Azure Batch Job. :ivar id: A string that uniquely identifies the Job within the Account. The ID can contain any @@ -1228,25 +1508,25 @@ class BatchJobCreateContent(_model_base.Model): :vartype common_environment_settings: list[~azure.batch.models.EnvironmentSetting] :ivar pool_info: The Pool on which the Batch service runs the Job's Tasks. Required. :vartype pool_info: ~azure.batch.models.BatchPoolInfo - :ivar on_all_tasks_complete: The action the Batch service should take when all Tasks in the Job - are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered - complete. This option is therefore most commonly used with a Job Manager task; if you want to - use automatic Job termination without a Job Manager, you should initially set + :ivar all_tasks_complete_mode: The action the Batch service should take when all Tasks in the + Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are + considered complete. This option is therefore most commonly used with a Job Manager task; if + you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. Known values are: "noaction" and "terminatejob". - :vartype on_all_tasks_complete: str or ~azure.batch.models.OnAllBatchTasksComplete - :ivar on_task_failure: The action the Batch service should take when any Task in the Job fails. - A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task - completes with a non-zero exit code after exhausting its retry count, or if there was an error - starting the Task, for example due to a resource file download error. The default is noaction. - Known values are: "noaction" and "performexitoptionsjobaction". - :vartype on_task_failure: str or ~azure.batch.models.OnBatchTaskFailure + :vartype all_tasks_complete_mode: str or ~azure.batch.models.BatchAllTasksCompleteMode + :ivar task_failure_mode: The action the Batch service should take when any Task in the Job + fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the + Task completes with a non-zero exit code after exhausting its retry count, or if there was an + error starting the Task, for example due to a resource file download error. The default is + noaction. Known values are: "noaction" and "performexitoptionsjobaction". + :vartype task_failure_mode: str or ~azure.batch.models.BatchTaskFailureMode :ivar network_configuration: The network configuration for the Job. :vartype network_configuration: ~azure.batch.models.BatchJobNetworkConfiguration :ivar metadata: A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] """ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -1321,7 +1601,7 @@ class BatchJobCreateContent(_model_base.Model): name="poolInfo", visibility=["read", "create", "update", "delete", "query"] ) """The Pool on which the Batch service runs the Job's Tasks. Required.""" - on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = rest_field( + all_tasks_complete_mode: Optional[Union[str, "_models.BatchAllTasksCompleteMode"]] = rest_field( name="onAllTasksComplete", visibility=["read", "create", "update", "delete", "query"] ) """The action the Batch service should take when all Tasks in the Job are in the completed state. @@ -1330,7 +1610,7 @@ class BatchJobCreateContent(_model_base.Model): termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. Known values are: \"noaction\" and \"terminatejob\".""" - on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = rest_field( + task_failure_mode: Optional[Union[str, "_models.BatchTaskFailureMode"]] = rest_field( name="onTaskFailure", visibility=["read", "create", "update", "delete", "query"] ) """The action the Batch service should take when any Task in the Job fails. A Task is considered @@ -1342,7 +1622,7 @@ class BatchJobCreateContent(_model_base.Model): name="networkConfiguration", visibility=["read", "create", "update", "delete", "query"] ) """The network configuration for the Job.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with the Job as metadata. The Batch service does not @@ -1364,10 +1644,10 @@ def __init__( job_preparation_task: Optional["_models.BatchJobPreparationTask"] = None, job_release_task: Optional["_models.BatchJobReleaseTask"] = None, common_environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, - on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, - on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = None, + all_tasks_complete_mode: Optional[Union[str, "_models.BatchAllTasksCompleteMode"]] = None, + task_failure_mode: Optional[Union[str, "_models.BatchTaskFailureMode"]] = None, network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, ) -> None: ... @overload @@ -1381,7 +1661,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobDisableContent(_model_base.Model): +class BatchJobDisableOptions(_Model): """Parameters for disabling an Azure Batch Job. :ivar disable_tasks: What to do with active Tasks associated with the Job. Required. Known @@ -1413,7 +1693,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobExecutionInfo(_model_base.Model): +class BatchJobExecutionInfo(_Model): """Contains information about the execution of a Job in the Azure Batch service. :ivar start_time: The start time of the Job. This is the time at which the Job was created. @@ -1500,7 +1780,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobManagerTask(_model_base.Model): +class BatchJobManagerTask(_Model): """Specifies details of a Job Manager Task. The Job Manager Task is automatically started when the Job is created. The Batch service tries to schedule the Job Manager Task before any other Tasks in @@ -1762,7 +2042,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobNetworkConfiguration(_model_base.Model): +class BatchJobNetworkConfiguration(_Model): """The network configuration for the Job. :ivar subnet_id: The ARM resource identifier of the virtual network subnet which Compute Nodes @@ -1786,11 +2066,11 @@ class BatchJobNetworkConfiguration(_model_base.Model): `_. Required. :vartype subnet_id: str - :ivar skip_withdraw_from_v_net: Whether to withdraw Compute Nodes from the virtual network to + :ivar skip_withdraw_from_vnet: Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. Required. - :vartype skip_withdraw_from_v_net: bool + :vartype skip_withdraw_from_vnet: bool """ subnet_id: str = rest_field(name="subnetId", visibility=["read", "create", "update", "delete", "query"]) @@ -1813,7 +2093,7 @@ class BatchJobNetworkConfiguration(_model_base.Model): `https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration `_. Required.""" - skip_withdraw_from_v_net: bool = rest_field( + skip_withdraw_from_vnet: bool = rest_field( name="skipWithdrawFromVNet", visibility=["read", "create", "update", "delete", "query"] ) """Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or @@ -1825,7 +2105,7 @@ def __init__( self, *, subnet_id: str, - skip_withdraw_from_v_net: bool, + skip_withdraw_from_vnet: bool, ) -> None: ... @overload @@ -1839,7 +2119,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobPreparationAndReleaseTaskStatus(_model_base.Model): +class BatchJobPreparationAndReleaseTaskStatus(_Model): """The status of the Job Preparation and Job Release Tasks on a Compute Node. :ivar pool_id: The ID of the Pool containing the Compute Node to which this entry refers. @@ -1896,7 +2176,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobPreparationTask(_model_base.Model): +class BatchJobPreparationTask(_Model): """A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. You can use Job Preparation to prepare a Node to run Tasks for the Job. Activities commonly performed in Job Preparation include: Downloading common @@ -2086,7 +2366,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobPreparationTaskExecutionInfo(_model_base.Model): +class BatchJobPreparationTaskExecutionInfo(_Model): """Contains information about the execution of a Job Preparation Task on a Compute Node. @@ -2228,7 +2508,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobReleaseTask(_model_base.Model): +class BatchJobReleaseTask(_Model): """A Job Release Task to run on Job completion on any Compute Node where the Job has run. The Job Release Task runs when the Job ends, because of one of the following: The user calls the Terminate Job API, or the Delete Job API while the Job is @@ -2383,7 +2663,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobReleaseTaskExecutionInfo(_model_base.Model): +class BatchJobReleaseTaskExecutionInfo(_Model): """Contains information about the execution of a Job Release Task on a Compute Node. @@ -2494,7 +2774,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobSchedule(_model_base.Model): +class BatchJobSchedule(_Model): """A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a specification used to create each Job. @@ -2537,11 +2817,11 @@ class BatchJobSchedule(_model_base.Model): :vartype execution_info: ~azure.batch.models.BatchJobScheduleExecutionInfo :ivar metadata: A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - :vartype metadata: list[~azure.batch.models.MetadataItem] - :ivar stats: The lifetime resource usage statistics for the Job Schedule. The statistics may - not be immediately available. The Batch service performs periodic roll-up of statistics. The - typical delay is about 30 minutes. - :vartype stats: ~azure.batch.models.BatchJobScheduleStatistics + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] + :ivar job_schedule_statistics: The lifetime resource usage statistics for the Job Schedule. The + statistics may not be immediately available. The Batch service performs periodic roll-up of + statistics. The typical delay is about 30 minutes. + :vartype job_schedule_statistics: ~azure.batch.models.BatchJobScheduleStatistics """ id: Optional[str] = rest_field(visibility=["read"]) @@ -2592,12 +2872,14 @@ class BatchJobSchedule(_model_base.Model): name="executionInfo", visibility=["read"] ) """Information about Jobs that have been and will be run under this schedule.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code.""" - stats: Optional["_models.BatchJobScheduleStatistics"] = rest_field(visibility=["read"]) + job_schedule_statistics: Optional["_models.BatchJobScheduleStatistics"] = rest_field( + name="stats", visibility=["read"] + ) """The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.""" @@ -2608,7 +2890,7 @@ def __init__( *, job_specification: "_models.BatchJobSpecification", schedule: Optional["_models.BatchJobScheduleConfiguration"] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, ) -> None: ... @overload @@ -2622,7 +2904,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleConfiguration(_model_base.Model): +class BatchJobScheduleConfiguration(_Model): """The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. @@ -2726,7 +3008,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleCreateContent(_model_base.Model): +class BatchJobScheduleCreateOptions(_Model): """Parameters for creating an Azure Batch Job Schedule. :ivar id: A string that uniquely identifies the schedule within the Account. The ID can contain @@ -2744,7 +3026,7 @@ class BatchJobScheduleCreateContent(_model_base.Model): :vartype job_specification: ~azure.batch.models.BatchJobSpecification :ivar metadata: A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] """ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -2766,7 +3048,7 @@ class BatchJobScheduleCreateContent(_model_base.Model): name="jobSpecification", visibility=["read", "create", "update", "delete", "query"] ) """The details of the Jobs to be created on this schedule. Required.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with the schedule as metadata. The Batch service does not @@ -2780,7 +3062,7 @@ def __init__( schedule: "_models.BatchJobScheduleConfiguration", job_specification: "_models.BatchJobSpecification", display_name: Optional[str] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, ) -> None: ... @overload @@ -2794,7 +3076,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleExecutionInfo(_model_base.Model): +class BatchJobScheduleExecutionInfo(_Model): """Contains information about Jobs that have been and will be run under a Job Schedule. @@ -2849,7 +3131,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleStatistics(_model_base.Model): +class BatchJobScheduleStatistics(_Model): """Resource usage statistics for a Job Schedule. :ivar url: The URL of the statistics. Required. @@ -2871,29 +3153,29 @@ class BatchJobScheduleStatistics(_model_base.Model): had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. Required. :vartype wall_clock_time: ~datetime.timedelta - :ivar read_i_ops: The total number of disk read operations made by all Tasks in all Jobs + :ivar read_iops: The total number of disk read operations made by all Tasks in all Jobs created + under the schedule. Required. + :vartype read_iops: int + :ivar write_iops: The total number of disk write operations made by all Tasks in all Jobs created under the schedule. Required. - :vartype read_i_ops: int - :ivar write_i_ops: The total number of disk write operations made by all Tasks in all Jobs - created under the schedule. Required. - :vartype write_i_ops: int + :vartype write_iops: int :ivar read_io_gi_b: The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. Required. :vartype read_io_gi_b: float :ivar write_io_gi_b: The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. Required. :vartype write_io_gi_b: float - :ivar num_succeeded_tasks: The total number of Tasks successfully completed during the given + :ivar succeeded_tasks_count: The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. Required. - :vartype num_succeeded_tasks: int - :ivar num_failed_tasks: The total number of Tasks that failed during the given time range in + :vartype succeeded_tasks_count: int + :ivar failed_tasks_count: The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. Required. - :vartype num_failed_tasks: int - :ivar num_task_retries: The total number of retries during the given time range on all Tasks in - all Jobs created under the schedule. Required. - :vartype num_task_retries: int + :vartype failed_tasks_count: int + :ivar task_retries_count: The total number of retries during the given time range on all Tasks + in all Jobs created under the schedule. Required. + :vartype task_retries_count: int :ivar wait_time: The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to @@ -2931,12 +3213,12 @@ class BatchJobScheduleStatistics(_model_base.Model): finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. Required.""" - read_i_ops: int = rest_field( + read_iops: int = rest_field( name="readIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk read operations made by all Tasks in all Jobs created under the schedule. Required.""" - write_i_ops: int = rest_field( + write_iops: int = rest_field( name="writeIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk write operations made by all Tasks in all Jobs created under the @@ -2947,18 +3229,18 @@ class BatchJobScheduleStatistics(_model_base.Model): write_io_gi_b: float = rest_field(name="writeIOGiB", visibility=["read", "create", "update", "delete", "query"]) """The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. Required.""" - num_succeeded_tasks: int = rest_field( + succeeded_tasks_count: int = rest_field( name="numSucceededTasks", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. Required.""" - num_failed_tasks: int = rest_field( + failed_tasks_count: int = rest_field( name="numFailedTasks", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. Required.""" - num_task_retries: int = rest_field( + task_retries_count: int = rest_field( name="numTaskRetries", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of retries during the given time range on all Tasks in all Jobs created under @@ -2982,13 +3264,13 @@ def __init__( user_cpu_time: datetime.timedelta, kernel_cpu_time: datetime.timedelta, wall_clock_time: datetime.timedelta, - read_i_ops: int, - write_i_ops: int, + read_iops: int, + write_iops: int, read_io_gi_b: float, write_io_gi_b: float, - num_succeeded_tasks: int, - num_failed_tasks: int, - num_task_retries: int, + succeeded_tasks_count: int, + failed_tasks_count: int, + task_retries_count: int, wait_time: datetime.timedelta, ) -> None: ... @@ -3003,7 +3285,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleUpdateContent(_model_base.Model): +class BatchJobScheduleUpdateOptions(_Model): """Parameters for updating an Azure Batch Job Schedule. :ivar schedule: The schedule according to which Jobs will be created. All times are fixed @@ -3016,7 +3298,7 @@ class BatchJobScheduleUpdateContent(_model_base.Model): :vartype job_specification: ~azure.batch.models.BatchJobSpecification :ivar metadata: A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify this element, existing metadata is left unchanged. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] """ schedule: Optional["_models.BatchJobScheduleConfiguration"] = rest_field( @@ -3031,7 +3313,7 @@ class BatchJobScheduleUpdateContent(_model_base.Model): """The details of the Jobs to be created on this schedule. Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify @@ -3043,7 +3325,7 @@ def __init__( *, schedule: Optional["_models.BatchJobScheduleConfiguration"] = None, job_specification: Optional["_models.BatchJobSpecification"] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, ) -> None: ... @overload @@ -3057,12 +3339,12 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobSchedulingError(_model_base.Model): +class BatchJobSchedulingError(_Model): """An error encountered by the Batch service when scheduling a Job. :ivar category: The category of the Job scheduling error. Required. Known values are: "usererror" and "servererror". - :vartype category: str or ~azure.batch.models.ErrorCategory + :vartype category: str or ~azure.batch.models.BatchErrorSourceCategory :ivar code: An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. :vartype code: str @@ -3073,7 +3355,7 @@ class BatchJobSchedulingError(_model_base.Model): :vartype details: list[~azure.batch.models.NameValuePair] """ - category: Union[str, "_models.ErrorCategory"] = rest_field( + category: Union[str, "_models.BatchErrorSourceCategory"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The category of the Job scheduling error. Required. Known values are: \"usererror\" and @@ -3093,7 +3375,7 @@ class BatchJobSchedulingError(_model_base.Model): def __init__( self, *, - category: Union[str, "_models.ErrorCategory"], + category: Union[str, "_models.BatchErrorSourceCategory"], code: Optional[str] = None, message: Optional[str] = None, details: Optional[List["_models.NameValuePair"]] = None, @@ -3110,7 +3392,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobSpecification(_model_base.Model): +class BatchJobSpecification(_Model): """Specifies details of the Jobs to be created on a schedule. :ivar priority: The priority of Jobs created under this schedule. Priority values can range @@ -3136,21 +3418,21 @@ class BatchJobSpecification(_model_base.Model): :ivar uses_task_dependencies: Whether Tasks in the Job can define dependencies on each other. The default is false. :vartype uses_task_dependencies: bool - :ivar on_all_tasks_complete: The action the Batch service should take when all Tasks in a Job + :ivar all_tasks_complete_mode: The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. Known values are: "noaction" and "terminatejob". - :vartype on_all_tasks_complete: str or ~azure.batch.models.OnAllBatchTasksComplete - :ivar on_task_failure: The action the Batch service should take when any Task fails in a Job + :vartype all_tasks_complete_mode: str or ~azure.batch.models.BatchAllTasksCompleteMode + :ivar task_failure_mode: The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. Known values are: "noaction" and "performexitoptionsjobaction". - :vartype on_task_failure: str or ~azure.batch.models.OnBatchTaskFailure + :vartype task_failure_mode: str or ~azure.batch.models.BatchTaskFailureMode :ivar network_configuration: The network configuration for the Job. :vartype network_configuration: ~azure.batch.models.BatchJobNetworkConfiguration :ivar constraints: The execution constraints for Jobs created under this schedule. @@ -3183,7 +3465,7 @@ class BatchJobSpecification(_model_base.Model): :ivar metadata: A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] """ priority: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -3214,7 +3496,7 @@ class BatchJobSpecification(_model_base.Model): name="usesTaskDependencies", visibility=["read", "create", "update", "delete", "query"] ) """Whether Tasks in the Job can define dependencies on each other. The default is false.""" - on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = rest_field( + all_tasks_complete_mode: Optional[Union[str, "_models.BatchAllTasksCompleteMode"]] = rest_field( name="onAllTasksComplete", visibility=["read", "create", "update", "delete", "query"] ) """The action the Batch service should take when all Tasks in a Job created under this schedule @@ -3224,7 +3506,7 @@ class BatchJobSpecification(_model_base.Model): onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. Known values are: \"noaction\" and \"terminatejob\".""" - on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = rest_field( + task_failure_mode: Optional[Union[str, "_models.BatchTaskFailureMode"]] = rest_field( name="onTaskFailure", visibility=["read", "create", "update", "delete", "query"] ) """The action the Batch service should take when any Task fails in a Job created under this @@ -3276,7 +3558,7 @@ class BatchJobSpecification(_model_base.Model): ) """The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. Required.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with each Job created under this schedule as metadata. @@ -3293,15 +3575,15 @@ def __init__( max_parallel_tasks: Optional[int] = None, display_name: Optional[str] = None, uses_task_dependencies: Optional[bool] = None, - on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, - on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = None, + all_tasks_complete_mode: Optional[Union[str, "_models.BatchAllTasksCompleteMode"]] = None, + task_failure_mode: Optional[Union[str, "_models.BatchTaskFailureMode"]] = None, network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = None, constraints: Optional["_models.BatchJobConstraints"] = None, job_manager_task: Optional["_models.BatchJobManagerTask"] = None, job_preparation_task: Optional["_models.BatchJobPreparationTask"] = None, job_release_task: Optional["_models.BatchJobReleaseTask"] = None, common_environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, ) -> None: ... @overload @@ -3315,7 +3597,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobStatistics(_model_base.Model): +class BatchJobStatistics(_Model): """Resource usage statistics for a Job. :ivar url: The URL of the statistics. Required. @@ -3336,28 +3618,28 @@ class BatchJobStatistics(_model_base.Model): (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. Required. :vartype wall_clock_time: ~datetime.timedelta - :ivar read_i_ops: The total number of disk read operations made by all Tasks in the Job. + :ivar read_iops: The total number of disk read operations made by all Tasks in the Job. Required. - :vartype read_i_ops: int - :ivar write_i_ops: The total number of disk write operations made by all Tasks in the Job. + :vartype read_iops: int + :ivar write_iops: The total number of disk write operations made by all Tasks in the Job. Required. - :vartype write_i_ops: int + :vartype write_iops: int :ivar read_io_gi_b: The total amount of data in GiB read from disk by all Tasks in the Job. Required. :vartype read_io_gi_b: float :ivar write_io_gi_b: The total amount of data in GiB written to disk by all Tasks in the Job. Required. :vartype write_io_gi_b: float - :ivar num_succeeded_tasks: The total number of Tasks successfully completed in the Job during + :ivar succeeded_tasks_count: The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. Required. - :vartype num_succeeded_tasks: int - :ivar num_failed_tasks: The total number of Tasks in the Job that failed during the given time - range. A Task fails if it exhausts its maximum retry count without returning exit code 0. + :vartype succeeded_tasks_count: int + :ivar failed_tasks_count: The total number of Tasks in the Job that failed during the given + time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. Required. - :vartype num_failed_tasks: int - :ivar num_task_retries: The total number of retries on all the Tasks in the Job during the + :vartype failed_tasks_count: int + :ivar task_retries_count: The total number of retries on all the Tasks in the Job during the given time range. Required. - :vartype num_task_retries: int + :vartype task_retries_count: int :ivar wait_time: The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task @@ -3394,11 +3676,11 @@ class BatchJobStatistics(_model_base.Model): from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. Required.""" - read_i_ops: int = rest_field( + read_iops: int = rest_field( name="readIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk read operations made by all Tasks in the Job. Required.""" - write_i_ops: int = rest_field( + write_iops: int = rest_field( name="writeIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk write operations made by all Tasks in the Job. Required.""" @@ -3406,17 +3688,17 @@ class BatchJobStatistics(_model_base.Model): """The total amount of data in GiB read from disk by all Tasks in the Job. Required.""" write_io_gi_b: float = rest_field(name="writeIOGiB", visibility=["read", "create", "update", "delete", "query"]) """The total amount of data in GiB written to disk by all Tasks in the Job. Required.""" - num_succeeded_tasks: int = rest_field( + succeeded_tasks_count: int = rest_field( name="numSucceededTasks", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. Required.""" - num_failed_tasks: int = rest_field( + failed_tasks_count: int = rest_field( name="numFailedTasks", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. Required.""" - num_task_retries: int = rest_field( + task_retries_count: int = rest_field( name="numTaskRetries", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of retries on all the Tasks in the Job during the given time range. Required.""" @@ -3439,13 +3721,13 @@ def __init__( user_cpu_time: datetime.timedelta, kernel_cpu_time: datetime.timedelta, wall_clock_time: datetime.timedelta, - read_i_ops: int, - write_i_ops: int, + read_iops: int, + write_iops: int, read_io_gi_b: float, write_io_gi_b: float, - num_succeeded_tasks: int, - num_failed_tasks: int, - num_task_retries: int, + succeeded_tasks_count: int, + failed_tasks_count: int, + task_retries_count: int, wait_time: datetime.timedelta, ) -> None: ... @@ -3460,7 +3742,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobTerminateContent(_model_base.Model): +class BatchJobTerminateOptions(_Model): """Parameters for terminating an Azure Batch Job. :ivar termination_reason: The text you want to appear as the Job's TerminationReason. The @@ -3491,7 +3773,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobUpdateContent(_model_base.Model): +class BatchJobUpdateOptions(_Model): """Parameters for updating an Azure Batch Job. :ivar priority: The priority of the Job. Priority values can range from -1000 to 1000, with @@ -3519,16 +3801,16 @@ class BatchJobUpdateContent(_model_base.Model): only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. :vartype pool_info: ~azure.batch.models.BatchPoolInfo - :ivar on_all_tasks_complete: The action the Batch service should take when all Tasks in the Job - are in the completed state. If omitted, the completion behavior is left unchanged. You may not - change the value from terminatejob to noaction - that is, once you have engaged automatic Job - termination, you cannot turn it off again. If you try to do this, the request fails with an + :ivar all_tasks_complete_mode: The action the Batch service should take when all Tasks in the + Job are in the completed state. If omitted, the completion behavior is left unchanged. You may + not change the value from terminatejob to noaction - that is, once you have engaged automatic + Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Known values are: "noaction" and "terminatejob". - :vartype on_all_tasks_complete: str or ~azure.batch.models.OnAllBatchTasksComplete + :vartype all_tasks_complete_mode: str or ~azure.batch.models.BatchAllTasksCompleteMode :ivar metadata: A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] :ivar network_configuration: The network configuration for the Job. :vartype network_configuration: ~azure.batch.models.BatchJobNetworkConfiguration """ @@ -3565,7 +3847,7 @@ class BatchJobUpdateContent(_model_base.Model): keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool.""" - on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = rest_field( + all_tasks_complete_mode: Optional[Union[str, "_models.BatchAllTasksCompleteMode"]] = rest_field( name="onAllTasksComplete", visibility=["read", "create", "update", "delete", "query"] ) """The action the Batch service should take when all Tasks in the Job are in the completed state. @@ -3574,7 +3856,7 @@ class BatchJobUpdateContent(_model_base.Model): turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Known values are: \"noaction\" and \"terminatejob\".""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job @@ -3593,8 +3875,8 @@ def __init__( max_parallel_tasks: Optional[int] = None, constraints: Optional["_models.BatchJobConstraints"] = None, pool_info: Optional["_models.BatchPoolInfo"] = None, - on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + all_tasks_complete_mode: Optional[Union[str, "_models.BatchAllTasksCompleteMode"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = None, ) -> None: ... @@ -3609,7 +3891,41 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNode(_model_base.Model): +class BatchMetadataItem(_Model): + """The Batch service does not assign any meaning to this metadata; it is solely + for the use of user code. + + :ivar name: The name of the metadata item. Required. + :vartype name: str + :ivar value: The value of the metadata item. Required. + :vartype value: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the metadata item. Required.""" + value: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The value of the metadata item. Required.""" + + @overload + def __init__( + self, + *, + name: str, + value: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchNode(_Model): """A Compute Node in the Batch service. :ivar id: The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a @@ -3863,7 +4179,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeAgentInfo(_model_base.Model): +class BatchNodeAgentInfo(_Model): """The Batch Compute Node agent is a program that runs on each Compute Node in the Pool and provides Batch capability on the Compute Node. @@ -3908,7 +4224,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeCounts(_model_base.Model): +class BatchNodeCounts(_Model): """The number of Compute Nodes in each Compute Node state. :ivar creating: The number of Compute Nodes in the creating state. Required. @@ -4021,7 +4337,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeDeallocateContent(_model_base.Model): +class BatchNodeDeallocateOptions(_Model): """Options for deallocating a Compute Node. :ivar node_deallocate_option: When to deallocate the Compute Node and what to do with currently @@ -4055,7 +4371,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeDisableSchedulingContent(_model_base.Model): +class BatchNodeDisableSchedulingOptions(_Model): """Parameters for disabling scheduling on an Azure Batch Compute Node. :ivar node_disable_scheduling_option: What to do with currently running Tasks when disabling @@ -4089,7 +4405,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeEndpointConfiguration(_model_base.Model): +class BatchNodeEndpointConfiguration(_Model): """The endpoint configuration for the Compute Node. :ivar inbound_endpoints: The list of inbound endpoints that are accessible on the Compute Node. @@ -4120,7 +4436,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeError(_model_base.Model): +class BatchNodeError(_Model): """An error encountered by a Compute Node. :ivar code: An identifier for the Compute Node error. Codes are invariant and are intended to @@ -4164,7 +4480,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeFile(_model_base.Model): +class BatchNodeFile(_Model): """Information about a file or directory on a Compute Node. :ivar name: The file path. @@ -4211,7 +4527,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeIdentityReference(_model_base.Model): +class BatchNodeIdentityReference(_Model): """The reference to a user assigned identity associated with the Batch pool which a compute node will use. @@ -4242,7 +4558,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeInfo(_model_base.Model): +class BatchNodeInfo(_Model): """Information about the Compute Node on which a Task ran. :ivar affinity_id: An identifier for the Node on which the Task ran, which can be passed when @@ -4303,7 +4619,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodePlacementConfiguration(_model_base.Model): +class BatchNodePlacementConfiguration(_Model): """For regional placement, nodes in the pool will be allocated in the same region. For zonal placement, nodes in the pool will be spread across different zones with best effort balancing. @@ -4339,16 +4655,16 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeRebootContent(_model_base.Model): +class BatchNodeRebootOptions(_Model): """Parameters for rebooting an Azure Batch Compute Node. - :ivar node_reboot_option: When to reboot the Compute Node and what to do with currently running + :ivar node_reboot_kind: When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. Known values are: "requeue", "terminate", "taskcompletion", and "retaineddata". - :vartype node_reboot_option: str or ~azure.batch.models.BatchNodeRebootOption + :vartype node_reboot_kind: str or ~azure.batch.models.BatchNodeRebootKind """ - node_reboot_option: Optional[Union[str, "_models.BatchNodeRebootOption"]] = rest_field( + node_reboot_kind: Optional[Union[str, "_models.BatchNodeRebootKind"]] = rest_field( name="nodeRebootOption", visibility=["read", "create", "update", "delete", "query"] ) """When to reboot the Compute Node and what to do with currently running Tasks. The default value @@ -4359,7 +4675,7 @@ class BatchNodeRebootContent(_model_base.Model): def __init__( self, *, - node_reboot_option: Optional[Union[str, "_models.BatchNodeRebootOption"]] = None, + node_reboot_kind: Optional[Union[str, "_models.BatchNodeRebootKind"]] = None, ) -> None: ... @overload @@ -4373,7 +4689,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeReimageContent(_model_base.Model): +class BatchNodeReimageOptions(_Model): """Parameters for reimaging an Azure Batch Compute Node. :ivar node_reimage_option: When to reimage the Compute Node and what to do with currently @@ -4407,7 +4723,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeRemoteLoginSettings(_model_base.Model): +class BatchNodeRemoteLoginSettings(_Model): """The remote login settings for a Compute Node. :ivar remote_login_ip_address: The IP address used for remote login to the Compute Node. @@ -4445,12 +4761,12 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeRemoveContent(_model_base.Model): +class BatchNodeRemoveOptions(_Model): """Parameters for removing nodes from an Azure Batch Pool. - :ivar node_list: A list containing the IDs of the Compute Nodes to be removed from the - specified Pool. A maximum of 100 nodes may be removed per request. Required. - :vartype node_list: list[str] + :ivar node_ids: A list containing the IDs of the Compute Nodes to be removed from the specified + Pool. A maximum of 100 nodes may be removed per request. Required. + :vartype node_ids: list[str] :ivar resize_timeout: The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code @@ -4462,7 +4778,7 @@ class BatchNodeRemoveContent(_model_base.Model): :vartype node_deallocation_option: str or ~azure.batch.models.BatchNodeDeallocationOption """ - node_list: List[str] = rest_field(name="nodeList", visibility=["read", "create", "update", "delete", "query"]) + node_ids: List[str] = rest_field(name="nodeList", visibility=["read", "create", "update", "delete", "query"]) """A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. Required.""" resize_timeout: Optional[datetime.timedelta] = rest_field( @@ -4483,7 +4799,7 @@ class BatchNodeRemoveContent(_model_base.Model): def __init__( self, *, - node_list: List[str], + node_ids: List[str], resize_timeout: Optional[datetime.timedelta] = None, node_deallocation_option: Optional[Union[str, "_models.BatchNodeDeallocationOption"]] = None, ) -> None: ... @@ -4499,7 +4815,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeUserCreateContent(_model_base.Model): +class BatchNodeUserCreateOptions(_Model): """Parameters for creating a user account for RDP or SSH access on an Azure Batch Compute Node. :ivar name: The user name of the Account. Required. @@ -4565,7 +4881,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeUserUpdateContent(_model_base.Model): +class BatchNodeUserUpdateOptions(_Model): """Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. :ivar password: The password of the Account. The password is required for Windows Compute @@ -4622,7 +4938,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeVMExtension(_model_base.Model): +class BatchNodeVMExtension(_Model): """The configuration for virtual machine extension instance view. :ivar provisioning_state: The provisioning state of the virtual machine extension. @@ -4666,7 +4982,72 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPool(_model_base.Model): +class BatchOsDisk(_Model): + """Settings for the operating system disk of the compute node (VM). + + :ivar ephemeral_os_disk_settings: Specifies the ephemeral Disk Settings for the operating + system disk used by the compute node (VM). + :vartype ephemeral_os_disk_settings: ~azure.batch.models.BatchDiffDiskSettings + :ivar caching: Specifies the caching requirements. Possible values are: None, ReadOnly, + ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. + Known values are: "none", "readonly", and "readwrite". + :vartype caching: str or ~azure.batch.models.CachingType + :ivar disk_size_gb: The initial disk size in GB when creating new OS disk. + :vartype disk_size_gb: int + :ivar managed_disk: The managed disk parameters. + :vartype managed_disk: ~azure.batch.models.ManagedDisk + :ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or + disabled on the disk. + :vartype write_accelerator_enabled: bool + """ + + ephemeral_os_disk_settings: Optional["_models.BatchDiffDiskSettings"] = rest_field( + name="ephemeralOSDiskSettings", visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies the ephemeral Disk Settings for the operating system disk used by the compute node + (VM).""" + caching: Optional[Union[str, "_models.CachingType"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default + values are: None for Standard storage. ReadOnly for Premium storage. Known values are: + \"none\", \"readonly\", and \"readwrite\".""" + disk_size_gb: Optional[int] = rest_field( + name="diskSizeGB", visibility=["read", "create", "update", "delete", "query"] + ) + """The initial disk size in GB when creating new OS disk.""" + managed_disk: Optional["_models.ManagedDisk"] = rest_field( + name="managedDisk", visibility=["read", "create", "update", "delete", "query"] + ) + """The managed disk parameters.""" + write_accelerator_enabled: Optional[bool] = rest_field( + name="writeAcceleratorEnabled", visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies whether writeAccelerator should be enabled or disabled on the disk.""" + + @overload + def __init__( + self, + *, + ephemeral_os_disk_settings: Optional["_models.BatchDiffDiskSettings"] = None, + caching: Optional[Union[str, "_models.CachingType"]] = None, + disk_size_gb: Optional[int] = None, + managed_disk: Optional["_models.ManagedDisk"] = None, + write_accelerator_enabled: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchPool(_Model): """A Pool in the Azure Batch service. :ivar id: A string that uniquely identifies the Pool within the Account. The ID can contain any @@ -4784,13 +5165,13 @@ class BatchPool(_model_base.Model): :ivar user_accounts: The list of user Accounts to be created on each Compute Node in the Pool. :vartype user_accounts: list[~azure.batch.models.UserAccount] :ivar metadata: A list of name-value pairs associated with the Pool as metadata. - :vartype metadata: list[~azure.batch.models.MetadataItem] - :ivar stats: Utilization and resource usage statistics for the entire lifetime of the Pool. - This property is populated only if the BatchPool was retrieved with an expand clause including - the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. - The Batch service performs periodic roll-up of statistics. The typical delay is about 30 - minutes. - :vartype stats: ~azure.batch.models.BatchPoolStatistics + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] + :ivar pool_statistics: Utilization and resource usage statistics for the entire lifetime of the + Pool. This property is populated only if the BatchPool was retrieved with an expand clause + including the 'stats' attribute; otherwise it is null. The statistics may not be immediately + available. The Batch service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + :vartype pool_statistics: ~azure.batch.models.BatchPoolStatistics :ivar mount_configuration: A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. :vartype mount_configuration: list[~azure.batch.models.MountConfiguration] @@ -4941,9 +5322,9 @@ class BatchPool(_model_base.Model): spread.""" user_accounts: Optional[List["_models.UserAccount"]] = rest_field(name="userAccounts", visibility=["read"]) """The list of user Accounts to be created on each Compute Node in the Pool.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field(visibility=["read"]) + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field(visibility=["read"]) """A list of name-value pairs associated with the Pool as metadata.""" - stats: Optional["_models.BatchPoolStatistics"] = rest_field(visibility=["read"]) + pool_statistics: Optional["_models.BatchPoolStatistics"] = rest_field(name="stats", visibility=["read"]) """Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch @@ -4992,62 +5373,164 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolCreateContent(_model_base.Model): - """Parameters for creating an Azure Batch Pool. +class BatchPoolAutoScaleEnableOptions(_Model): + """Parameters for enabling automatic scaling on an Azure Batch Pool. - :ivar id: A string that uniquely identifies the Pool within the Account. The ID can contain any - combination of alphanumeric characters including hyphens and underscores, and cannot contain - more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not - have two Pool IDs within an Account that differ only by case). Required. - :vartype id: str - :ivar display_name: The display name for the Pool. The display name need not be unique and can - contain any Unicode characters up to a maximum length of 1024. - :vartype display_name: str - :ivar vm_size: The size of virtual machines in the Pool. All virtual machines in a Pool are the - same size. For information about available VM sizes for Pools using Images from the Virtual - Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual - Machines in Azure (`https://learn.microsoft.com/azure/virtual-machines/sizes/overview - `_). Batch supports all - Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and - STANDARD_DSV2 series). Required. - :vartype vm_size: str - :ivar virtual_machine_configuration: The virtual machine configuration for the Pool. This - property must be specified. - :vartype virtual_machine_configuration: ~azure.batch.models.VirtualMachineConfiguration - :ivar resize_timeout: The timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale is set to true. The - default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 - minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - :vartype resize_timeout: ~datetime.timedelta - :ivar resource_tags: The user-specified tags associated with the pool. The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags are propagated to the - backing Azure resources associated with the pool. This property can only be specified when the - Batch account was created with the poolAllocationMode property set to 'UserSubscription'. - :vartype resource_tags: dict[str, str] - :ivar target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to - false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. - :vartype target_dedicated_nodes: int - :ivar target_low_priority_nodes: The desired number of Spot/Low-priority Compute Nodes in the - Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale - is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or - both. - :vartype target_low_priority_nodes: int - :ivar enable_auto_scale: Whether the Pool size should automatically adjust over time. If false, - at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the - autoScaleFormula property is required and the Pool automatically resizes according to the - formula. The default value is false. - :vartype enable_auto_scale: bool - :ivar auto_scale_formula: A formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to false. It is required if - enableAutoScale is set to true. The formula is checked for validity before the Pool is created. - If the formula is not valid, the Batch service rejects the request with detailed error - information. For more information about specifying this formula, see 'Automatically scale - Compute Nodes in an Azure Batch Pool' - (`https://learn.microsoft.com/azure/batch/batch-automatic-scaling - `_). - :vartype auto_scale_formula: str + :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. The + default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours + respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch + service rejects the request with an invalid property value error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then + the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation + schedule will be started, with its starting time being the time when this request was issued. + :vartype auto_scale_formula: str + :ivar auto_scale_evaluation_interval: The time interval at which to automatically adjust the + Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and + maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 + minutes or greater than 168 hours, the Batch service rejects the request with an invalid + property value error; if you are calling the REST API directly, the HTTP status code is 400 + (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule + will be stopped and a new autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :vartype auto_scale_evaluation_interval: ~datetime.timedelta + """ + + auto_scale_formula: Optional[str] = rest_field( + name="autoScaleFormula", visibility=["read", "create", "update", "delete", "query"] + ) + """The formula for the desired number of Compute Nodes in the Pool. The default value is 15 + minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify + a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request + with an invalid property value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). If you specify a new interval, then the existing autoscale + evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, + with its starting time being the time when this request was issued.""" + auto_scale_evaluation_interval: Optional[datetime.timedelta] = rest_field( + name="autoScaleEvaluationInterval", visibility=["read", "create", "update", "delete", "query"] + ) + """The time interval at which to automatically adjust the Pool size according to the autoscale + formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 + hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the + Batch service rejects the request with an invalid property value error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation + schedule will be started, with its starting time being the time when this request was issued.""" + + @overload + def __init__( + self, + *, + auto_scale_formula: Optional[str] = None, + auto_scale_evaluation_interval: Optional[datetime.timedelta] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchPoolAutoScaleEvaluateOptions(_Model): + """Parameters for evaluating an automatic scaling formula on an Azure Batch Pool. + + :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. The + formula is validated and its results calculated, but it is not applied to the Pool. To apply + the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about + specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool + (`https://learn.microsoft.com/azure/batch/batch-automatic-scaling + `_). Required. + :vartype auto_scale_formula: str + """ + + auto_scale_formula: str = rest_field( + name="autoScaleFormula", visibility=["read", "create", "update", "delete", "query"] + ) + """The formula for the desired number of Compute Nodes in the Pool. The formula is validated and + its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, + 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see + Automatically scale Compute Nodes in an Azure Batch Pool + (`https://learn.microsoft.com/azure/batch/batch-automatic-scaling + `_). Required.""" + + @overload + def __init__( + self, + *, + auto_scale_formula: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchPoolCreateOptions(_Model): + """Parameters for creating an Azure Batch Pool. + + :ivar id: A string that uniquely identifies the Pool within the Account. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not + have two Pool IDs within an Account that differ only by case). Required. + :vartype id: str + :ivar display_name: The display name for the Pool. The display name need not be unique and can + contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar vm_size: The size of virtual machines in the Pool. All virtual machines in a Pool are the + same size. For information about available VM sizes for Pools using Images from the Virtual + Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual + Machines in Azure (`https://learn.microsoft.com/azure/virtual-machines/sizes/overview + `_). Batch supports all + Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and + STANDARD_DSV2 series). Required. + :vartype vm_size: str + :ivar virtual_machine_configuration: The virtual machine configuration for the Pool. This + property must be specified. + :vartype virtual_machine_configuration: ~azure.batch.models.VirtualMachineConfiguration + :ivar resize_timeout: The timeout for allocation of Compute Nodes to the Pool. This timeout + applies only to manual scaling; it has no effect when enableAutoScale is set to true. The + default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 + minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP + status code is 400 (Bad Request). + :vartype resize_timeout: ~datetime.timedelta + :ivar resource_tags: The user-specified tags associated with the pool. The user-defined tags to + be associated with the Azure Batch Pool. When specified, these tags are propagated to the + backing Azure resources associated with the pool. This property can only be specified when the + Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + :vartype resource_tags: dict[str, str] + :ivar target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool. This + property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to + false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :vartype target_dedicated_nodes: int + :ivar target_low_priority_nodes: The desired number of Spot/Low-priority Compute Nodes in the + Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale + is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or + both. + :vartype target_low_priority_nodes: int + :ivar enable_auto_scale: Whether the Pool size should automatically adjust over time. If false, + at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the + autoScaleFormula property is required and the Pool automatically resizes according to the + formula. The default value is false. + :vartype enable_auto_scale: bool + :ivar auto_scale_formula: A formula for the desired number of Compute Nodes in the Pool. This + property must not be specified if enableAutoScale is set to false. It is required if + enableAutoScale is set to true. The formula is checked for validity before the Pool is created. + If the formula is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see 'Automatically scale + Compute Nodes in an Azure Batch Pool' + (`https://learn.microsoft.com/azure/batch/batch-automatic-scaling + `_). + :vartype auto_scale_formula: str :ivar auto_scale_evaluation_interval: The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 @@ -5094,7 +5577,7 @@ class BatchPoolCreateContent(_model_base.Model): :vartype user_accounts: list[~azure.batch.models.UserAccount] :ivar metadata: A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] :ivar mount_configuration: Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. :vartype mount_configuration: list[~azure.batch.models.MountConfiguration] @@ -5234,7 +5717,7 @@ class BatchPoolCreateContent(_model_base.Model): name="userAccounts", visibility=["read", "create", "update", "delete", "query"] ) """The list of user Accounts to be created on each Compute Node in the Pool.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with the Pool as metadata. The Batch service does not @@ -5277,7 +5760,7 @@ def __init__( task_slots_per_node: Optional[int] = None, task_scheduling_policy: Optional["_models.BatchTaskSchedulingPolicy"] = None, user_accounts: Optional[List["_models.UserAccount"]] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, mount_configuration: Optional[List["_models.MountConfiguration"]] = None, target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, upgrade_policy: Optional["_models.UpgradePolicy"] = None, @@ -5294,69 +5777,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolEnableAutoScaleContent(_model_base.Model): - """Parameters for enabling automatic scaling on an Azure Batch Pool. - - :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. The - default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; if you are calling the REST - API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then - the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation - schedule will be started, with its starting time being the time when this request was issued. - :vartype auto_scale_formula: str - :ivar auto_scale_evaluation_interval: The time interval at which to automatically adjust the - Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and - maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 - minutes or greater than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the HTTP status code is 400 - (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule - will be stopped and a new autoscale evaluation schedule will be started, with its starting time - being the time when this request was issued. - :vartype auto_scale_evaluation_interval: ~datetime.timedelta - """ - - auto_scale_formula: Optional[str] = rest_field( - name="autoScaleFormula", visibility=["read", "create", "update", "delete", "query"] - ) - """The formula for the desired number of Compute Nodes in the Pool. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify - a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request - with an invalid property value error; if you are calling the REST API directly, the HTTP status - code is 400 (Bad Request). If you specify a new interval, then the existing autoscale - evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, - with its starting time being the time when this request was issued.""" - auto_scale_evaluation_interval: Optional[datetime.timedelta] = rest_field( - name="autoScaleEvaluationInterval", visibility=["read", "create", "update", "delete", "query"] - ) - """The time interval at which to automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 - hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the - Batch service rejects the request with an invalid property value error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, - then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation - schedule will be started, with its starting time being the time when this request was issued.""" - - @overload - def __init__( - self, - *, - auto_scale_formula: Optional[str] = None, - auto_scale_evaluation_interval: Optional[datetime.timedelta] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class BatchPoolEndpointConfiguration(_model_base.Model): +class BatchPoolEndpointConfiguration(_Model): """The endpoint configuration for a Pool. :ivar inbound_nat_pools: A list of inbound NAT Pools that can be used to address specific ports @@ -5364,10 +5785,10 @@ class BatchPoolEndpointConfiguration(_model_base.Model): Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. Required. - :vartype inbound_nat_pools: list[~azure.batch.models.InboundNatPool] + :vartype inbound_nat_pools: list[~azure.batch.models.BatchInboundNatPool] """ - inbound_nat_pools: List["_models.InboundNatPool"] = rest_field( + inbound_nat_pools: List["_models.BatchInboundNatPool"] = rest_field( name="inboundNATPools", visibility=["read", "create", "update", "delete", "query"] ) """A list of inbound NAT Pools that can be used to address specific ports on an individual Compute @@ -5379,47 +5800,7 @@ class BatchPoolEndpointConfiguration(_model_base.Model): def __init__( self, *, - inbound_nat_pools: List["_models.InboundNatPool"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class BatchPoolEvaluateAutoScaleContent(_model_base.Model): - """Parameters for evaluating an automatic scaling formula on an Azure Batch Pool. - - :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. The - formula is validated and its results calculated, but it is not applied to the Pool. To apply - the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about - specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool - (`https://learn.microsoft.com/azure/batch/batch-automatic-scaling - `_). Required. - :vartype auto_scale_formula: str - """ - - auto_scale_formula: str = rest_field( - name="autoScaleFormula", visibility=["read", "create", "update", "delete", "query"] - ) - """The formula for the desired number of Compute Nodes in the Pool. The formula is validated and - its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, - 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see - Automatically scale Compute Nodes in an Azure Batch Pool - (`https://learn.microsoft.com/azure/batch/batch-automatic-scaling - `_). Required.""" - - @overload - def __init__( - self, - *, - auto_scale_formula: str, + inbound_nat_pools: List["_models.BatchInboundNatPool"], ) -> None: ... @overload @@ -5433,7 +5814,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolIdentity(_model_base.Model): +class BatchPoolIdentity(_Model): """The identity of the Batch pool, if configured. :ivar type: The identity of the Batch pool, if configured. The list of user identities @@ -5445,7 +5826,7 @@ class BatchPoolIdentity(_model_base.Model): :ivar user_assigned_identities: The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - :vartype user_assigned_identities: list[~azure.batch.models.UserAssignedIdentity] + :vartype user_assigned_identities: list[~azure.batch.models.BatchUserAssignedIdentity] """ type: Union[str, "_models.BatchPoolIdentityType"] = rest_field( @@ -5455,7 +5836,7 @@ class BatchPoolIdentity(_model_base.Model): Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. Required. Known values are: \"UserAssigned\" and \"None\".""" - user_assigned_identities: Optional[List["_models.UserAssignedIdentity"]] = rest_field( + user_assigned_identities: Optional[List["_models.BatchUserAssignedIdentity"]] = rest_field( name="userAssignedIdentities", visibility=["read", "create", "update", "delete", "query"] ) """The list of user identities associated with the Batch account. The user identity dictionary key @@ -5467,7 +5848,7 @@ def __init__( self, *, type: Union[str, "_models.BatchPoolIdentityType"], - user_assigned_identities: Optional[List["_models.UserAssignedIdentity"]] = None, + user_assigned_identities: Optional[List["_models.BatchUserAssignedIdentity"]] = None, ) -> None: ... @overload @@ -5481,7 +5862,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolInfo(_model_base.Model): +class BatchPoolInfo(_Model): """Specifies how a Job should be assigned to a Pool. :ivar pool_id: The ID of an existing Pool. All the Tasks of the Job will run on the specified @@ -5538,7 +5919,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolNodeCounts(_model_base.Model): +class BatchPoolNodeCounts(_Model): """The number of Compute Nodes in each state for a Pool. :ivar pool_id: The ID of the Pool. Required. @@ -5580,7 +5961,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolReplaceContent(_model_base.Model): +class BatchPoolReplaceOptions(_Model): """Parameters for replacing properties on an Azure Batch Pool. :ivar start_task: A Task to run on each Compute Node as it joins the Pool. The Task runs when @@ -5617,7 +5998,7 @@ class BatchPoolReplaceContent(_model_base.Model): :ivar metadata: A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. Required. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] :ivar target_node_communication_mode: The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. Known values are: "default", "classic", and "simplified". @@ -5657,7 +6038,7 @@ class BatchPoolReplaceContent(_model_base.Model): 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. Required.""" - metadata: List["_models.MetadataItem"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + metadata: List["_models.BatchMetadataItem"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. Required.""" @@ -5674,7 +6055,7 @@ def __init__( *, certificate_references: List["_models.BatchCertificateReference"], application_package_references: List["_models.BatchApplicationPackageReference"], - metadata: List["_models.MetadataItem"], + metadata: List["_models.BatchMetadataItem"], start_task: Optional["_models.BatchStartTask"] = None, target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, ) -> None: ... @@ -5690,7 +6071,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolResizeContent(_model_base.Model): +class BatchPoolResizeOptions(_Model): """Parameters for changing the size of an Azure Batch Pool. :ivar target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool. @@ -5752,7 +6133,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolResourceStatistics(_model_base.Model): +class BatchPoolResourceStatistics(_Model): """Statistics related to resource consumption by Compute Nodes in a Pool. :ivar start_time: The start time of the time range covered by the statistics. Required. @@ -5775,12 +6156,12 @@ class BatchPoolResourceStatistics(_model_base.Model): :ivar peak_disk_gi_b: The peak used disk space in GiB across all Compute Nodes in the Pool. Required. :vartype peak_disk_gi_b: float - :ivar disk_read_i_ops: The total number of disk read operations across all Compute Nodes in the + :ivar disk_read_iops: The total number of disk read operations across all Compute Nodes in the Pool. Required. - :vartype disk_read_i_ops: int - :ivar disk_write_i_ops: The total number of disk write operations across all Compute Nodes in + :vartype disk_read_iops: int + :ivar disk_write_iops: The total number of disk write operations across all Compute Nodes in the Pool. Required. - :vartype disk_write_i_ops: int + :vartype disk_write_iops: int :ivar disk_read_gi_b: The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. Required. :vartype disk_read_gi_b: float @@ -5818,11 +6199,11 @@ class BatchPoolResourceStatistics(_model_base.Model): """The average used disk space in GiB across all Compute Nodes in the Pool. Required.""" peak_disk_gi_b: float = rest_field(name="peakDiskGiB", visibility=["read", "create", "update", "delete", "query"]) """The peak used disk space in GiB across all Compute Nodes in the Pool. Required.""" - disk_read_i_ops: int = rest_field( + disk_read_iops: int = rest_field( name="diskReadIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk read operations across all Compute Nodes in the Pool. Required.""" - disk_write_i_ops: int = rest_field( + disk_write_iops: int = rest_field( name="diskWriteIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk write operations across all Compute Nodes in the Pool. Required.""" @@ -5852,8 +6233,8 @@ def __init__( peak_memory_gi_b: float, avg_disk_gi_b: float, peak_disk_gi_b: float, - disk_read_i_ops: int, - disk_write_i_ops: int, + disk_read_iops: int, + disk_write_iops: int, disk_read_gi_b: float, disk_write_gi_b: float, network_read_gi_b: float, @@ -5871,7 +6252,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolSpecification(_model_base.Model): +class BatchPoolSpecification(_Model): """Specification for creating a new Pool. :ivar display_name: The display name for the Pool. The display name need not be unique and can @@ -5963,7 +6344,7 @@ class BatchPoolSpecification(_model_base.Model): :vartype user_accounts: list[~azure.batch.models.UserAccount] :ivar metadata: A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] :ivar mount_configuration: A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. :vartype mount_configuration: list[~azure.batch.models.MountConfiguration] @@ -6092,7 +6473,7 @@ class BatchPoolSpecification(_model_base.Model): name="userAccounts", visibility=["read", "create", "update", "delete", "query"] ) """The list of user Accounts to be created on each Compute Node in the Pool.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with the Pool as metadata. The Batch service does not @@ -6134,7 +6515,7 @@ def __init__( certificate_references: Optional[List["_models.BatchCertificateReference"]] = None, application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, user_accounts: Optional[List["_models.UserAccount"]] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, mount_configuration: Optional[List["_models.MountConfiguration"]] = None, target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, upgrade_policy: Optional["_models.UpgradePolicy"] = None, @@ -6151,7 +6532,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolStatistics(_model_base.Model): +class BatchPoolStatistics(_Model): """Contains utilization and resource usage statistics for the lifetime of a Pool. :ivar url: The URL for the statistics. Required. @@ -6161,10 +6542,11 @@ class BatchPoolStatistics(_model_base.Model): :ivar last_update_time: The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. Required. :vartype last_update_time: ~datetime.datetime - :ivar usage_stats: Statistics related to Pool usage, such as the amount of core-time used. - :vartype usage_stats: ~azure.batch.models.BatchPoolUsageStatistics - :ivar resource_stats: Statistics related to resource consumption by Compute Nodes in the Pool. - :vartype resource_stats: ~azure.batch.models.BatchPoolResourceStatistics + :ivar usage_statistics: Statistics related to Pool usage, such as the amount of core-time used. + :vartype usage_statistics: ~azure.batch.models.BatchPoolUsageStatistics + :ivar resource_statistics: Statistics related to resource consumption by Compute Nodes in the + Pool. + :vartype resource_statistics: ~azure.batch.models.BatchPoolResourceStatistics """ url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -6178,11 +6560,11 @@ class BatchPoolStatistics(_model_base.Model): ) """The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. Required.""" - usage_stats: Optional["_models.BatchPoolUsageStatistics"] = rest_field( + usage_statistics: Optional["_models.BatchPoolUsageStatistics"] = rest_field( name="usageStats", visibility=["read", "create", "update", "delete", "query"] ) """Statistics related to Pool usage, such as the amount of core-time used.""" - resource_stats: Optional["_models.BatchPoolResourceStatistics"] = rest_field( + resource_statistics: Optional["_models.BatchPoolResourceStatistics"] = rest_field( name="resourceStats", visibility=["read", "create", "update", "delete", "query"] ) """Statistics related to resource consumption by Compute Nodes in the Pool.""" @@ -6194,8 +6576,8 @@ def __init__( url: str, start_time: datetime.datetime, last_update_time: datetime.datetime, - usage_stats: Optional["_models.BatchPoolUsageStatistics"] = None, - resource_stats: Optional["_models.BatchPoolResourceStatistics"] = None, + usage_statistics: Optional["_models.BatchPoolUsageStatistics"] = None, + resource_statistics: Optional["_models.BatchPoolResourceStatistics"] = None, ) -> None: ... @overload @@ -6209,7 +6591,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolUpdateContent(_model_base.Model): +class BatchPoolUpdateOptions(_Model): """Parameters for updating an Azure Batch Pool. :ivar display_name: The display name for the Pool. The display name need not be unique and can @@ -6259,7 +6641,7 @@ class BatchPoolUpdateContent(_model_base.Model): element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. - :vartype metadata: list[~azure.batch.models.MetadataItem] + :vartype metadata: list[~azure.batch.models.BatchMetadataItem] :ivar virtual_machine_configuration: The virtual machine configuration for the Pool. This property must be specified.

This field can be updated only when the pool is empty. :vartype virtual_machine_configuration: ~azure.batch.models.VirtualMachineConfiguration @@ -6348,7 +6730,7 @@ class BatchPoolUpdateContent(_model_base.Model): replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged.""" - metadata: Optional[List["_models.MetadataItem"]] = rest_field( + metadata: Optional[List["_models.BatchMetadataItem"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs associated with the Pool as metadata. If this element is present, it @@ -6416,7 +6798,7 @@ def __init__( start_task: Optional["_models.BatchStartTask"] = None, certificate_references: Optional[List["_models.BatchCertificateReference"]] = None, application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, - metadata: Optional[List["_models.MetadataItem"]] = None, + metadata: Optional[List["_models.BatchMetadataItem"]] = None, virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = None, target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, task_slots_per_node: Optional[int] = None, @@ -6439,7 +6821,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolUsageMetrics(_model_base.Model): +class BatchPoolUsageMetrics(_Model): """Usage metrics for a Pool across an aggregation interval. :ivar pool_id: The ID of the Pool whose metrics are aggregated in this entry. Required. @@ -6501,7 +6883,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolUsageStatistics(_model_base.Model): +class BatchPoolUsageStatistics(_Model): """Statistics related to Pool usage information. :ivar start_time: The start time of the time range covered by the statistics. Required. @@ -6549,9 +6931,59 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchStartTask(_model_base.Model): - """Batch will retry Tasks when a recovery operation is triggered on a Node. - Examples of recovery operations include (but are not limited to) when an +class BatchPublicIpAddressConfiguration(_Model): + """The public IP Address configuration of the networking configuration of a Pool. + + :ivar ip_address_provisioning_type: The provisioning type for Public IP Addresses for the Pool. + The default value is BatchManaged. Known values are: "batchmanaged", "usermanaged", and + "nopublicipaddresses". + :vartype ip_address_provisioning_type: str or ~azure.batch.models.IpAddressProvisioningType + :ivar ip_address_ids: The list of public IPs which the Batch service will use when provisioning + Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 + dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For + example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each + element of this collection is of the form: + /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. + :vartype ip_address_ids: list[str] + """ + + ip_address_provisioning_type: Optional[Union[str, "_models.IpAddressProvisioningType"]] = rest_field( + name="provision", visibility=["read", "create", "update", "delete", "query"] + ) + """The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. + Known values are: \"batchmanaged\", \"usermanaged\", and \"nopublicipaddresses\".""" + ip_address_ids: Optional[List[str]] = rest_field( + name="ipAddressIds", visibility=["read", "create", "update", "delete", "query"] + ) + """The list of public IPs which the Batch service will use when provisioning Compute Nodes. The + number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 + Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 + dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of + the form: + /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}.""" + + @overload + def __init__( + self, + *, + ip_address_provisioning_type: Optional[Union[str, "_models.IpAddressProvisioningType"]] = None, + ip_address_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchStartTask(_Model): + """Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute Node disappeared due to host failure. Retries due to recovery operations are independent of and are not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal @@ -6700,7 +7132,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchStartTaskInfo(_model_base.Model): +class BatchStartTaskInfo(_Model): """Information about a StartTask running on a Compute Node. :ivar state: The state of the StartTask on the Compute Node. Required. Known values are: @@ -6825,7 +7257,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchSubtask(_model_base.Model): +class BatchSubtask(_Model): """Information about an Azure Batch subtask. :ivar id: The ID of the subtask. @@ -6957,7 +7389,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchSupportedImage(_model_base.Model): +class BatchSupportedImage(_Model): """A reference to the Azure Virtual Machines Marketplace Image and additional information about the Image. @@ -6966,7 +7398,7 @@ class BatchSupportedImage(_model_base.Model): :vartype node_agent_sku_id: str :ivar image_reference: The reference to the Azure Virtual Machine's Marketplace Image. Required. - :vartype image_reference: ~azure.batch.models.ImageReference + :vartype image_reference: ~azure.batch.models.BatchVmImageReference :ivar os_type: The type of operating system (e.g. Windows or Linux) of the Image. Required. Known values are: "linux" and "windows". :vartype os_type: str or ~azure.batch.models.OSType @@ -6987,7 +7419,7 @@ class BatchSupportedImage(_model_base.Model): name="nodeAgentSKUId", visibility=["read", "create", "update", "delete", "query"] ) """The ID of the Compute Node agent SKU which the Image supports. Required.""" - image_reference: "_models.ImageReference" = rest_field( + image_reference: "_models.BatchVmImageReference" = rest_field( name="imageReference", visibility=["read", "create", "update", "delete", "query"] ) """The reference to the Azure Virtual Machine's Marketplace Image. Required.""" @@ -7015,7 +7447,7 @@ def __init__( self, *, node_agent_sku_id: str, - image_reference: "_models.ImageReference", + image_reference: "_models.BatchVmImageReference", os_type: Union[str, "_models.OSType"], verification_type: Union[str, "_models.ImageVerificationType"], capabilities: Optional[List[str]] = None, @@ -7033,7 +7465,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTask(_model_base.Model): +class BatchTask(_Model): """Batch will retry Tasks when a recovery operation is triggered on a Node. Examples of recovery operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute Node disappeared due to host failure. @@ -7113,7 +7545,7 @@ class BatchTask(_model_base.Model): :vartype environment_settings: list[~azure.batch.models.EnvironmentSetting] :ivar affinity_info: A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. - :vartype affinity_info: ~azure.batch.models.AffinityInfo + :vartype affinity_info: ~azure.batch.models.BatchAffinityInfo :ivar constraints: The execution constraints that apply to this Task. :vartype constraints: ~azure.batch.models.BatchTaskConstraints :ivar required_slots: The number of scheduling slots that the Task requires to run. The default @@ -7130,8 +7562,8 @@ class BatchTask(_model_base.Model): :ivar multi_instance_settings: An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. :vartype multi_instance_settings: ~azure.batch.models.MultiInstanceSettings - :ivar stats: Resource usage statistics for the Task. - :vartype stats: ~azure.batch.models.BatchTaskStatistics + :ivar task_statistics: Resource usage statistics for the Task. + :vartype task_statistics: ~azure.batch.models.BatchTaskStatistics :ivar depends_on: The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. @@ -7230,7 +7662,7 @@ class BatchTask(_model_base.Model): name="environmentSettings", visibility=["read"] ) """A list of environment variable settings for the Task.""" - affinity_info: Optional["_models.AffinityInfo"] = rest_field(name="affinityInfo", visibility=["read"]) + affinity_info: Optional["_models.BatchAffinityInfo"] = rest_field(name="affinityInfo", visibility=["read"]) """A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task.""" constraints: Optional["_models.BatchTaskConstraints"] = rest_field( @@ -7253,7 +7685,7 @@ class BatchTask(_model_base.Model): ) """An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task.""" - stats: Optional["_models.BatchTaskStatistics"] = rest_field(visibility=["read"]) + task_statistics: Optional["_models.BatchTaskStatistics"] = rest_field(name="stats", visibility=["read"]) """Resource usage statistics for the Task.""" depends_on: Optional["_models.BatchTaskDependencies"] = rest_field(name="dependsOn", visibility=["read"]) """The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it @@ -7297,102 +7729,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskAddCollectionResult(_model_base.Model): - """The result of adding a collection of Tasks to a Job. - - :ivar value: The results of the add Task collection operation. - :vartype value: list[~azure.batch.models.BatchTaskAddResult] - """ - - value: Optional[List["_models.BatchTaskAddResult"]] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The results of the add Task collection operation.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchTaskAddResult"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class BatchTaskAddResult(_model_base.Model): - """Result for a single Task added as part of an add Task collection operation. - - :ivar status: The status of the add Task request. Required. Known values are: "success", - "clienterror", and "servererror". - :vartype status: str or ~azure.batch.models.BatchTaskAddStatus - :ivar task_id: The ID of the Task for which this is the result. Required. - :vartype task_id: str - :ivar e_tag: The ETag of the Task, if the Task was successfully added. You can use this to - detect whether the Task has changed between requests. In particular, you can be pass the ETag - with an Update Task request to specify that your changes should take effect only if nobody else - has modified the Job in the meantime. - :vartype e_tag: str - :ivar last_modified: The last modified time of the Task. - :vartype last_modified: ~datetime.datetime - :ivar location: The URL of the Task, if the Task was successfully added. - :vartype location: str - :ivar error: The error encountered while attempting to add the Task. - :vartype error: ~azure.batch.models.BatchError - """ - - status: Union[str, "_models.BatchTaskAddStatus"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The status of the add Task request. Required. Known values are: \"success\", \"clienterror\", - and \"servererror\".""" - task_id: str = rest_field(name="taskId", visibility=["read", "create", "update", "delete", "query"]) - """The ID of the Task for which this is the result. Required.""" - e_tag: Optional[str] = rest_field(name="eTag", visibility=["read", "create", "update", "delete", "query"]) - """The ETag of the Task, if the Task was successfully added. You can use this to detect whether - the Task has changed between requests. In particular, you can be pass the ETag with an Update - Task request to specify that your changes should take effect only if nobody else has modified - the Job in the meantime.""" - last_modified: Optional[datetime.datetime] = rest_field( - name="lastModified", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" - ) - """The last modified time of the Task.""" - location: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The URL of the Task, if the Task was successfully added.""" - error: Optional["_models.BatchError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The error encountered while attempting to add the Task.""" - - @overload - def __init__( - self, - *, - status: Union[str, "_models.BatchTaskAddStatus"], - task_id: str, - e_tag: Optional[str] = None, - last_modified: Optional[datetime.datetime] = None, - location: Optional[str] = None, - error: Optional["_models.BatchError"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class BatchTaskConstraints(_model_base.Model): +class BatchTaskConstraints(_Model): """Execution constraints to apply to a Task. :ivar max_wall_clock_time: The maximum elapsed time that the Task may run, measured from the @@ -7460,7 +7797,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskContainerExecutionInfo(_model_base.Model): +class BatchTaskContainerExecutionInfo(_Model): """Contains information about the container which a Task is executing. :ivar container_id: The ID of the container. @@ -7506,7 +7843,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskContainerSettings(_model_base.Model): +class BatchTaskContainerSettings(_Model): """The container settings for a Task. :ivar container_run_options: Additional options to the container create command. These @@ -7580,7 +7917,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskCounts(_model_base.Model): +class BatchTaskCounts(_Model): """The Task counts for a Job. :ivar active: The number of Tasks in the active state. Required. @@ -7632,7 +7969,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskCountsResult(_model_base.Model): +class BatchTaskCountsResult(_Model): """The Task and TaskSlot counts for a Job. :ivar task_counts: The number of Tasks per state. Required. @@ -7669,7 +8006,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskCreateContent(_model_base.Model): +class BatchTaskCreateOptions(_Model): """Parameters for creating an Azure Batch Task. :ivar id: A string that uniquely identifies the Task within the Job. The ID can contain any @@ -7720,7 +8057,7 @@ class BatchTaskCreateContent(_model_base.Model): :vartype environment_settings: list[~azure.batch.models.EnvironmentSetting] :ivar affinity_info: A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. - :vartype affinity_info: ~azure.batch.models.AffinityInfo + :vartype affinity_info: ~azure.batch.models.BatchAffinityInfo :ivar constraints: The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. @@ -7817,7 +8154,7 @@ class BatchTaskCreateContent(_model_base.Model): name="environmentSettings", visibility=["read", "create", "update", "delete", "query"] ) """A list of environment variable settings for the Task.""" - affinity_info: Optional["_models.AffinityInfo"] = rest_field( + affinity_info: Optional["_models.BatchAffinityInfo"] = rest_field( name="affinityInfo", visibility=["read", "create", "update", "delete", "query"] ) """A locality hint that can be used by the Batch service to select a Compute Node on which to @@ -7884,7 +8221,7 @@ def __init__( resource_files: Optional[List["_models.ResourceFile"]] = None, output_files: Optional[List["_models.OutputFile"]] = None, environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, - affinity_info: Optional["_models.AffinityInfo"] = None, + affinity_info: Optional["_models.BatchAffinityInfo"] = None, constraints: Optional["_models.BatchTaskConstraints"] = None, required_slots: Optional[int] = None, user_identity: Optional["_models.UserIdentity"] = None, @@ -7905,7 +8242,72 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskDependencies(_model_base.Model): +class BatchTaskCreateResult(_Model): + """Result for a single Task created as part of an add Task collection operation. + + :ivar status: The status of the add Task request. Required. Known values are: "success", + "clienterror", and "servererror". + :vartype status: str or ~azure.batch.models.BatchTaskAddStatus + :ivar task_id: The ID of the Task for which this is the result. Required. + :vartype task_id: str + :ivar e_tag: The ETag of the Task, if the Task was successfully added. You can use this to + detect whether the Task has changed between requests. In particular, you can be pass the ETag + with an Update Task request to specify that your changes should take effect only if nobody else + has modified the Job in the meantime. + :vartype e_tag: str + :ivar last_modified: The last modified time of the Task. + :vartype last_modified: ~datetime.datetime + :ivar location: The URL of the Task, if the Task was successfully added. + :vartype location: str + :ivar error: The error encountered while attempting to add the Task. + :vartype error: ~azure.batch.models.BatchError + """ + + status: Union[str, "_models.BatchTaskAddStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the add Task request. Required. Known values are: \"success\", \"clienterror\", + and \"servererror\".""" + task_id: str = rest_field(name="taskId", visibility=["read", "create", "update", "delete", "query"]) + """The ID of the Task for which this is the result. Required.""" + e_tag: Optional[str] = rest_field(name="eTag", visibility=["read", "create", "update", "delete", "query"]) + """The ETag of the Task, if the Task was successfully added. You can use this to detect whether + the Task has changed between requests. In particular, you can be pass the ETag with an Update + Task request to specify that your changes should take effect only if nobody else has modified + the Job in the meantime.""" + last_modified: Optional[datetime.datetime] = rest_field( + name="lastModified", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """The last modified time of the Task.""" + location: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The URL of the Task, if the Task was successfully added.""" + error: Optional["_models.BatchError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The error encountered while attempting to add the Task.""" + + @overload + def __init__( + self, + *, + status: Union[str, "_models.BatchTaskAddStatus"], + task_id: str, + e_tag: Optional[str] = None, + last_modified: Optional[datetime.datetime] = None, + location: Optional[str] = None, + error: Optional["_models.BatchError"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchTaskDependencies(_Model): """Specifies any dependencies of a Task. Any Task that is explicitly specified or within a dependency range must complete before the dependant Task will be scheduled. @@ -7954,7 +8356,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskExecutionInfo(_model_base.Model): +class BatchTaskExecutionInfo(_Model): """Information about the execution of a Task. :ivar start_time: The time at which the Task started running. 'Running' corresponds to the @@ -8094,12 +8496,12 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskFailureInfo(_model_base.Model): +class BatchTaskFailureInfo(_Model): """Information about a Task failure. :ivar category: The category of the Task error. Required. Known values are: "usererror" and "servererror". - :vartype category: str or ~azure.batch.models.ErrorCategory + :vartype category: str or ~azure.batch.models.BatchErrorSourceCategory :ivar code: An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. :vartype code: str @@ -8110,7 +8512,7 @@ class BatchTaskFailureInfo(_model_base.Model): :vartype details: list[~azure.batch.models.NameValuePair] """ - category: Union[str, "_models.ErrorCategory"] = rest_field( + category: Union[str, "_models.BatchErrorSourceCategory"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The category of the Task error. Required. Known values are: \"usererror\" and \"servererror\".""" @@ -8128,7 +8530,7 @@ class BatchTaskFailureInfo(_model_base.Model): def __init__( self, *, - category: Union[str, "_models.ErrorCategory"], + category: Union[str, "_models.BatchErrorSourceCategory"], code: Optional[str] = None, message: Optional[str] = None, details: Optional[List["_models.NameValuePair"]] = None, @@ -8145,18 +8547,18 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskGroup(_model_base.Model): +class BatchTaskGroup(_Model): """A collection of Azure Batch Tasks to add. - :ivar value: The collection of Tasks to add. The maximum count of Tasks is 100. The total - serialized size of this collection must be less than 1MB. If it is greater than 1MB (for + :ivar values_property: The collection of Tasks to add. The maximum count of Tasks is 100. The + total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. Required. - :vartype value: list[~azure.batch.models.BatchTaskCreateContent] + :vartype values_property: list[~azure.batch.models.BatchTaskCreateOptions] """ - value: List["_models.BatchTaskCreateContent"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] + values_property: List["_models.BatchTaskCreateOptions"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] ) """The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has @@ -8167,7 +8569,7 @@ class BatchTaskGroup(_model_base.Model): def __init__( self, *, - value: List["_models.BatchTaskCreateContent"], + values_property: List["_models.BatchTaskCreateOptions"], ) -> None: ... @overload @@ -8181,7 +8583,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskIdRange(_model_base.Model): +class BatchTaskIdRange(_Model): """The start and end of the range are inclusive. For example, if a range has start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. @@ -8215,7 +8617,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskInfo(_model_base.Model): +class BatchTaskInfo(_Model): """Information about a Task running on a Compute Node. :ivar task_url: The URL of the Task. @@ -8274,7 +8676,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskSchedulingPolicy(_model_base.Model): +class BatchTaskSchedulingPolicy(_Model): """Specifies how Tasks should be distributed across Compute Nodes. :ivar node_fill_type: How Tasks are distributed across Compute Nodes in a Pool. If not @@ -8306,7 +8708,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskSlotCounts(_model_base.Model): +class BatchTaskSlotCounts(_Model): """The TaskSlot counts for a Job. :ivar active: The number of TaskSlots for active Tasks. Required. @@ -8354,7 +8756,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskStatistics(_model_base.Model): +class BatchTaskStatistics(_Model): """Resource usage statistics for a Task. :ivar url: The URL of the statistics. Required. @@ -8375,10 +8777,10 @@ class BatchTaskStatistics(_model_base.Model): the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. Required. :vartype wall_clock_time: ~datetime.timedelta - :ivar read_i_ops: The total number of disk read operations made by the Task. Required. - :vartype read_i_ops: int - :ivar write_i_ops: The total number of disk write operations made by the Task. Required. - :vartype write_i_ops: int + :ivar read_iops: The total number of disk read operations made by the Task. Required. + :vartype read_iops: int + :ivar write_iops: The total number of disk write operations made by the Task. Required. + :vartype write_iops: int :ivar read_io_gi_b: The total gibibytes read from disk by the Task. Required. :vartype read_io_gi_b: float :ivar write_io_gi_b: The total gibibytes written to disk by the Task. Required. @@ -8418,11 +8820,11 @@ class BatchTaskStatistics(_model_base.Model): Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. Required.""" - read_i_ops: int = rest_field( + read_iops: int = rest_field( name="readIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk read operations made by the Task. Required.""" - write_i_ops: int = rest_field( + write_iops: int = rest_field( name="writeIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk write operations made by the Task. Required.""" @@ -8447,8 +8849,8 @@ def __init__( user_cpu_time: datetime.timedelta, kernel_cpu_time: datetime.timedelta, wall_clock_time: datetime.timedelta, - read_i_ops: int, - write_i_ops: int, + read_iops: int, + write_iops: int, read_io_gi_b: float, write_io_gi_b: float, wait_time: datetime.timedelta, @@ -8465,51 +8867,32 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class CifsMountConfiguration(_model_base.Model): - """Information used to connect to a CIFS file system. +class BatchUefiSettings(_Model): + """Specifies the security settings like secure boot and vTPM used while creating the virtual + machine. - :ivar username: The user to use for authentication against the CIFS file system. Required. - :vartype username: str - :ivar source: The URI of the file system to mount. Required. - :vartype source: str - :ivar relative_mount_path: The relative path on the compute node where the file system will be - mounted. All file systems are mounted relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. Required. - :vartype relative_mount_path: str - :ivar mount_options: Additional command line options to pass to the mount command. These are - 'net use' options in Windows and 'mount' options in Linux. - :vartype mount_options: str - :ivar password: The password to use for authentication against the CIFS file system. Required. - :vartype password: str + :ivar secure_boot_enabled: Specifies whether secure boot should be enabled on the virtual + machine. + :vartype secure_boot_enabled: bool + :ivar v_tpm_enabled: Specifies whether vTPM should be enabled on the virtual machine. + :vartype v_tpm_enabled: bool """ - username: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The user to use for authentication against the CIFS file system. Required.""" - source: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The URI of the file system to mount. Required.""" - relative_mount_path: str = rest_field( - name="relativeMountPath", visibility=["read", "create", "update", "delete", "query"] + secure_boot_enabled: Optional[bool] = rest_field( + name="secureBootEnabled", visibility=["read", "create", "update", "delete", "query"] ) - """The relative path on the compute node where the file system will be mounted. All file systems - are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required.""" - mount_options: Optional[str] = rest_field( - name="mountOptions", visibility=["read", "create", "update", "delete", "query"] + """Specifies whether secure boot should be enabled on the virtual machine.""" + v_tpm_enabled: Optional[bool] = rest_field( + name="vTpmEnabled", visibility=["read", "create", "update", "delete", "query"] ) - """Additional command line options to pass to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux.""" - password: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The password to use for authentication against the CIFS file system. Required.""" + """Specifies whether vTPM should be enabled on the virtual machine.""" @overload def __init__( self, *, - username: str, - source: str, - relative_mount_path: str, - password: str, - mount_options: Optional[str] = None, + secure_boot_enabled: Optional[bool] = None, + v_tpm_enabled: Optional[bool] = None, ) -> None: ... @overload @@ -8523,45 +8906,196 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ContainerConfiguration(_model_base.Model): - """The configuration for container-enabled Pools. +class BatchUserAssignedIdentity(_Model): + """The user assigned Identity. - :ivar type: The container technology to be used. Required. Known values are: "dockerCompatible" - and "criCompatible". - :vartype type: str or ~azure.batch.models.ContainerType - :ivar container_image_names: The collection of container Image names. This is the full Image - reference, as would be specified to "docker pull". An Image will be sourced from the default - Docker registry unless the Image is fully qualified with an alternative registry. - :vartype container_image_names: list[str] - :ivar container_registries: Additional private registries from which containers can be pulled. - If any Images must be downloaded from a private registry which requires credentials, then those - credentials must be provided here. - :vartype container_registries: list[~azure.batch.models.ContainerRegistryReference] + :ivar resource_id: The ARM resource id of the user assigned identity. Required. + :vartype resource_id: str + :ivar client_id: The client id of the user assigned identity. + :vartype client_id: str + :ivar principal_id: The principal id of the user assigned identity. + :vartype principal_id: str """ - type: Union[str, "_models.ContainerType"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The container technology to be used. Required. Known values are: \"dockerCompatible\" and - \"criCompatible\".""" - container_image_names: Optional[List[str]] = rest_field( - name="containerImageNames", visibility=["read", "create", "update", "delete", "query"] + resource_id: str = rest_field(name="resourceId", visibility=["read", "create", "update", "delete", "query"]) + """The ARM resource id of the user assigned identity. Required.""" + client_id: Optional[str] = rest_field(name="clientId", visibility=["read"]) + """The client id of the user assigned identity.""" + principal_id: Optional[str] = rest_field(name="principalId", visibility=["read"]) + """The principal id of the user assigned identity.""" + + @overload + def __init__( + self, + *, + resource_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchVmImageReference(_Model): + """A reference to an Azure Virtual Machines Marketplace Image or a Azure Compute Gallery Image. + To get the list of all Azure Marketplace Image references verified by Azure Batch, see the + ' List Supported Images ' operation. + + :ivar publisher: The publisher of the Azure Virtual Machines Marketplace Image. For example, + Canonical or MicrosoftWindowsServer. + :vartype publisher: str + :ivar offer: The offer type of the Azure Virtual Machines Marketplace Image. For example, + UbuntuServer or WindowsServer. + :vartype offer: str + :ivar sku: The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or + 2019-Datacenter. + :vartype sku: str + :ivar version: The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' + can be specified to select the latest version of an Image. If omitted, the default is 'latest'. + :vartype version: str + :ivar virtual_machine_image_id: The ARM resource identifier of the Azure Compute Gallery Image. + Compute Nodes in the Pool will be created using this Image Id. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} + or + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} + for always defaulting to the latest image version. This property is mutually exclusive with + other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same + region and must be in the same subscription as the Azure Batch account. If the image version is + not specified in the imageId, the latest version will be used. For information about the + firewall settings for the Batch Compute Node agent to communicate with the Batch service see + `https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration + `_. + :vartype virtual_machine_image_id: str + :ivar exact_version: The specific version of the platform image or marketplace image used to + create the node. This read-only field differs from 'version' only if the value specified for + 'version' when the pool was created was 'latest'. + :vartype exact_version: str + :ivar shared_gallery_image_id: The shared gallery image unique identifier. This property is + mutually exclusive with other properties and can be fetched from shared gallery image GET call. + :vartype shared_gallery_image_id: str + :ivar community_gallery_image_id: The community gallery image unique identifier. This property + is mutually exclusive with other properties and can be fetched from community gallery image GET + call. + :vartype community_gallery_image_id: str + """ + + publisher: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or + MicrosoftWindowsServer.""" + offer: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or + WindowsServer.""" + sku: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or + 2019-Datacenter.""" + version: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be + specified to select the latest version of an Image. If omitted, the default is 'latest'.""" + virtual_machine_image_id: Optional[str] = rest_field( + name="virtualMachineImageId", visibility=["read", "create", "update", "delete", "query"] ) - """The collection of container Image names. This is the full Image reference, as would be - specified to \"docker pull\". An Image will be sourced from the default Docker registry unless - the Image is fully qualified with an alternative registry.""" - container_registries: Optional[List["_models.ContainerRegistryReference"]] = rest_field( - name="containerRegistries", visibility=["read", "create", "update", "delete", "query"] + """The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will + be created using this Image Id. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} + or + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} + for always defaulting to the latest image version. This property is mutually exclusive with + other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same + region and must be in the same subscription as the Azure Batch account. If the image version is + not specified in the imageId, the latest version will be used. For information about the + firewall settings for the Batch Compute Node agent to communicate with the Batch service see + `https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration + `_.""" + exact_version: Optional[str] = rest_field(name="exactVersion", visibility=["read"]) + """The specific version of the platform image or marketplace image used to create the node. This + read-only field differs from 'version' only if the value specified for 'version' when the pool + was created was 'latest'.""" + shared_gallery_image_id: Optional[str] = rest_field( + name="sharedGalleryImageId", visibility=["read", "create", "update", "delete", "query"] ) - """Additional private registries from which containers can be pulled. If any Images must be - downloaded from a private registry which requires credentials, then those credentials must be - provided here.""" + """The shared gallery image unique identifier. This property is mutually exclusive with other + properties and can be fetched from shared gallery image GET call.""" + community_gallery_image_id: Optional[str] = rest_field( + name="communityGalleryImageId", visibility=["read", "create", "update", "delete", "query"] + ) + """The community gallery image unique identifier. This property is mutually exclusive with other + properties and can be fetched from community gallery image GET call.""" @overload def __init__( self, *, - type: Union[str, "_models.ContainerType"], - container_image_names: Optional[List[str]] = None, - container_registries: Optional[List["_models.ContainerRegistryReference"]] = None, + publisher: Optional[str] = None, + offer: Optional[str] = None, + sku: Optional[str] = None, + version: Optional[str] = None, + virtual_machine_image_id: Optional[str] = None, + shared_gallery_image_id: Optional[str] = None, + community_gallery_image_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CifsMountConfiguration(_Model): + """Information used to connect to a CIFS file system. + + :ivar username: The user to use for authentication against the CIFS file system. Required. + :vartype username: str + :ivar source: The URI of the file system to mount. Required. + :vartype source: str + :ivar relative_mount_path: The relative path on the compute node where the file system will be + mounted. All file systems are mounted relative to the Batch mounts directory, accessible via + the AZ_BATCH_NODE_MOUNTS_DIR environment variable. Required. + :vartype relative_mount_path: str + :ivar mount_options: Additional command line options to pass to the mount command. These are + 'net use' options in Windows and 'mount' options in Linux. + :vartype mount_options: str + :ivar password: The password to use for authentication against the CIFS file system. Required. + :vartype password: str + """ + + username: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The user to use for authentication against the CIFS file system. Required.""" + source: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The URI of the file system to mount. Required.""" + relative_mount_path: str = rest_field( + name="relativeMountPath", visibility=["read", "create", "update", "delete", "query"] + ) + """The relative path on the compute node where the file system will be mounted. All file systems + are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR + environment variable. Required.""" + mount_options: Optional[str] = rest_field( + name="mountOptions", visibility=["read", "create", "update", "delete", "query"] + ) + """Additional command line options to pass to the mount command. These are 'net use' options in + Windows and 'mount' options in Linux.""" + password: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The password to use for authentication against the CIFS file system. Required.""" + + @overload + def __init__( + self, + *, + username: str, + source: str, + relative_mount_path: str, + password: str, + mount_options: Optional[str] = None, ) -> None: ... @overload @@ -8575,7 +9109,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ContainerHostBatchBindMountEntry(_model_base.Model): +class ContainerHostBatchBindMountEntry(_Model): """The entry of path and mount mode you want to mount into task container. :ivar source: The path which be mounted to container customer can select. Known values are: @@ -8621,7 +9155,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ContainerRegistryReference(_model_base.Model): +class ContainerRegistryReference(_Model): """A private container registry. :ivar username: The user name to log into the registry server. @@ -8670,7 +9204,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DataDisk(_model_base.Model): +class DataDisk(_Model): """Settings which will be used by the data disks associated to Compute Nodes in the Pool. When using attached data disks, you need to mount and format the disks from within a VM to use them. @@ -8734,43 +9268,27 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DeleteBatchCertificateError(_model_base.Model): - """An error encountered by the Batch service when deleting a Certificate. +class DiskEncryptionConfiguration(_Model): + """The disk encryption configuration applied on compute nodes in the pool. + Disk encryption configuration is not supported on Linux pool created with + Azure Compute Gallery Image. - :ivar code: An identifier for the Certificate deletion error. Codes are invariant and are - intended to be consumed programmatically. - :vartype code: str - :ivar message: A message describing the Certificate deletion error, intended to be suitable for - display in a user interface. - :vartype message: str - :ivar values_property: A list of additional error details related to the Certificate deletion - error. This list includes details such as the active Pools and Compute Nodes referencing this - Certificate. However, if a large number of resources reference the Certificate, the list - contains only about the first hundred. - :vartype values_property: list[~azure.batch.models.NameValuePair] + :ivar targets: The list of disk targets Batch Service will encrypt on the compute node. The + list of disk targets Batch Service will encrypt on the compute node. + :vartype targets: list[str or ~azure.batch.models.DiskEncryptionTarget] """ - code: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """An identifier for the Certificate deletion error. Codes are invariant and are intended to be - consumed programmatically.""" - message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """A message describing the Certificate deletion error, intended to be suitable for display in a - user interface.""" - values_property: Optional[List["_models.NameValuePair"]] = rest_field( - name="values", visibility=["read", "create", "update", "delete", "query"] + targets: Optional[List[Union[str, "_models.DiskEncryptionTarget"]]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] ) - """A list of additional error details related to the Certificate deletion error. This list - includes details such as the active Pools and Compute Nodes referencing this Certificate. - However, if a large number of resources reference the Certificate, the list contains only about - the first hundred.""" + """The list of disk targets Batch Service will encrypt on the compute node. The list of disk + targets Batch Service will encrypt on the compute node.""" @overload def __init__( self, *, - code: Optional[str] = None, - message: Optional[str] = None, - values_property: Optional[List["_models.NameValuePair"]] = None, + targets: Optional[List[Union[str, "_models.DiskEncryptionTarget"]]] = None, ) -> None: ... @overload @@ -8784,96 +9302,13 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DiffDiskSettings(_model_base.Model): - """Specifies the ephemeral Disk Settings for the operating system disk used by the - compute node (VM). +class EnvironmentSetting(_Model): + """An environment variable to be set on a Task process. - :ivar placement: Specifies the ephemeral disk placement for operating system disk for all VMs - in the pool. This property can be used by user in the request to choose the location e.g., - cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at - `https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - `_ - and Linux VMs at - `https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements - `_. - "cachedisk" - :vartype placement: str or ~azure.batch.models.DiffDiskPlacement - """ - - placement: Optional[Union[str, "_models.DiffDiskPlacement"]] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the location e.g., cache disk space for - Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, - please refer to Ephemeral OS disk size requirements for Windows VMs at - `https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - `_ - and Linux VMs at - `https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements - `_. - \"cachedisk\"""" - - @overload - def __init__( - self, - *, - placement: Optional[Union[str, "_models.DiffDiskPlacement"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class DiskEncryptionConfiguration(_model_base.Model): - """The disk encryption configuration applied on compute nodes in the pool. - Disk encryption configuration is not supported on Linux pool created with - Azure Compute Gallery Image. - - :ivar targets: The list of disk targets Batch Service will encrypt on the compute node. The - list of disk targets Batch Service will encrypt on the compute node. - :vartype targets: list[str or ~azure.batch.models.DiskEncryptionTarget] - """ - - targets: Optional[List[Union[str, "_models.DiskEncryptionTarget"]]] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The list of disk targets Batch Service will encrypt on the compute node. The list of disk - targets Batch Service will encrypt on the compute node.""" - - @overload - def __init__( - self, - *, - targets: Optional[List[Union[str, "_models.DiskEncryptionTarget"]]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class EnvironmentSetting(_model_base.Model): - """An environment variable to be set on a Task process. - - :ivar name: The name of the environment variable. Required. - :vartype name: str - :ivar value: The value of the environment variable. - :vartype value: str + :ivar name: The name of the environment variable. Required. + :vartype name: str + :ivar value: The value of the environment variable. + :vartype value: str """ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -8900,7 +9335,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ExitCodeMapping(_model_base.Model): +class ExitCodeMapping(_Model): """How the Batch service should respond if a Task exits with a particular exit code. @@ -8937,7 +9372,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ExitCodeRangeMapping(_model_base.Model): +class ExitCodeRangeMapping(_Model): """A range of exit codes and how the Batch service should respond to exit codes within that range. @@ -8980,7 +9415,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ExitConditions(_model_base.Model): +class ExitConditions(_Model): """Specifies how the Batch service should respond when the Task completes. :ivar exit_codes: A list of individual Task exit codes and how the Batch service should respond @@ -9053,7 +9488,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ExitOptions(_model_base.Model): +class ExitOptions(_Model): """Specifies how the Batch service responds to a particular exit condition. :ivar job_action: An action to take on the Job containing the Task, if the Task completes with @@ -9063,7 +9498,7 @@ class ExitOptions(_model_base.Model): Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Known values are: "none", "disable", and "terminate". - :vartype job_action: str or ~azure.batch.models.BatchJobAction + :vartype job_action: str or ~azure.batch.models.BatchJobActionKind :ivar dependency_action: An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. @@ -9071,7 +9506,7 @@ class ExitOptions(_model_base.Model): :vartype dependency_action: str or ~azure.batch.models.DependencyAction """ - job_action: Optional[Union[str, "_models.BatchJobAction"]] = rest_field( + job_action: Optional[Union[str, "_models.BatchJobActionKind"]] = rest_field( name="jobAction", visibility=["read", "create", "update", "delete", "query"] ) """An action to take on the Job containing the Task, if the Task completes with the given exit @@ -9092,7 +9527,7 @@ class ExitOptions(_model_base.Model): def __init__( self, *, - job_action: Optional[Union[str, "_models.BatchJobAction"]] = None, + job_action: Optional[Union[str, "_models.BatchJobActionKind"]] = None, dependency_action: Optional[Union[str, "_models.DependencyAction"]] = None, ) -> None: ... @@ -9107,7 +9542,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class FileProperties(_model_base.Model): +class FileProperties(_Model): """The properties of a file on a Compute Node. :ivar creation_time: The file creation time. The creation time is not returned for files on @@ -9166,232 +9601,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class GetCertificateResponse(_model_base.Model): - """GetCertificateResponse. - - :ivar thumbprint: The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex - digits (it may include spaces but these are removed). Required. - :vartype thumbprint: str - :ivar thumbprint_algorithm: The algorithm used to derive the thumbprint. This must be sha1. - Required. - :vartype thumbprint_algorithm: str - :ivar url: The URL of the Certificate. - :vartype url: str - :ivar state: The state of the Certificate. Known values are: "active", "deleting", and - "deletefailed". - :vartype state: str or ~azure.batch.models.BatchCertificateState - :ivar state_transition_time: The time at which the Certificate entered its current state. - :vartype state_transition_time: ~datetime.datetime - :ivar previous_state: The previous state of the Certificate. This property is not set if the - Certificate is in its initial active state. Known values are: "active", "deleting", and - "deletefailed". - :vartype previous_state: str or ~azure.batch.models.BatchCertificateState - :ivar previous_state_transition_time: The time at which the Certificate entered its previous - state. This property is not set if the Certificate is in its initial Active state. - :vartype previous_state_transition_time: ~datetime.datetime - :ivar public_data: The public part of the Certificate as a base-64 encoded .cer file. - :vartype public_data: str - :ivar delete_certificate_error: The error that occurred on the last attempt to delete this - Certificate. This property is set only if the Certificate is in the DeleteFailed state. - :vartype delete_certificate_error: ~azure.batch.models.DeleteBatchCertificateError - """ - - thumbprint: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may - include spaces but these are removed). Required.""" - thumbprint_algorithm: str = rest_field( - name="thumbprintAlgorithm", visibility=["read", "create", "update", "delete", "query"] - ) - """The algorithm used to derive the thumbprint. This must be sha1. Required.""" - url: Optional[str] = rest_field(visibility=["read"]) - """The URL of the Certificate.""" - state: Optional[Union[str, "_models.BatchCertificateState"]] = rest_field(visibility=["read"]) - """The state of the Certificate. Known values are: \"active\", \"deleting\", and \"deletefailed\".""" - state_transition_time: Optional[datetime.datetime] = rest_field( - name="stateTransitionTime", visibility=["read"], format="rfc3339" - ) - """The time at which the Certificate entered its current state.""" - previous_state: Optional[Union[str, "_models.BatchCertificateState"]] = rest_field( - name="previousState", visibility=["read"] - ) - """The previous state of the Certificate. This property is not set if the Certificate is in its - initial active state. Known values are: \"active\", \"deleting\", and \"deletefailed\".""" - previous_state_transition_time: Optional[datetime.datetime] = rest_field( - name="previousStateTransitionTime", visibility=["read"], format="rfc3339" - ) - """The time at which the Certificate entered its previous state. This property is not set if the - Certificate is in its initial Active state.""" - public_data: Optional[str] = rest_field(name="publicData", visibility=["read"]) - """The public part of the Certificate as a base-64 encoded .cer file.""" - delete_certificate_error: Optional["_models.DeleteBatchCertificateError"] = rest_field( - name="deleteCertificateError", visibility=["read"] - ) - """The error that occurred on the last attempt to delete this Certificate. This property is set - only if the Certificate is in the DeleteFailed state.""" - - @overload - def __init__( - self, - *, - thumbprint: str, - thumbprint_algorithm: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class HttpHeader(_model_base.Model): - """An HTTP header name-value pair. - - :ivar name: The case-insensitive name of the header to be used while uploading output files. - Required. - :vartype name: str - :ivar value: The value of the header to be used while uploading output files. - :vartype value: str - """ - - name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The case-insensitive name of the header to be used while uploading output files. Required.""" - value: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The value of the header to be used while uploading output files.""" - - @overload - def __init__( - self, - *, - name: str, - value: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ImageReference(_model_base.Model): - """A reference to an Azure Virtual Machines Marketplace Image or a Azure Compute Gallery Image. - To get the list of all Azure Marketplace Image references verified by Azure Batch, see the - ' List Supported Images ' operation. - - :ivar publisher: The publisher of the Azure Virtual Machines Marketplace Image. For example, - Canonical or MicrosoftWindowsServer. - :vartype publisher: str - :ivar offer: The offer type of the Azure Virtual Machines Marketplace Image. For example, - UbuntuServer or WindowsServer. - :vartype offer: str - :ivar sku: The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or - 2019-Datacenter. - :vartype sku: str - :ivar version: The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' - can be specified to select the latest version of an Image. If omitted, the default is 'latest'. - :vartype version: str - :ivar virtual_machine_image_id: The ARM resource identifier of the Azure Compute Gallery Image. - Compute Nodes in the Pool will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is mutually exclusive with - other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same - region and must be in the same subscription as the Azure Batch account. If the image version is - not specified in the imageId, the latest version will be used. For information about the - firewall settings for the Batch Compute Node agent to communicate with the Batch service see - `https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration - `_. - :vartype virtual_machine_image_id: str - :ivar exact_version: The specific version of the platform image or marketplace image used to - create the node. This read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - :vartype exact_version: str - :ivar shared_gallery_image_id: The shared gallery image unique identifier. This property is - mutually exclusive with other properties and can be fetched from shared gallery image GET call. - :vartype shared_gallery_image_id: str - :ivar community_gallery_image_id: The community gallery image unique identifier. This property - is mutually exclusive with other properties and can be fetched from community gallery image GET - call. - :vartype community_gallery_image_id: str - """ - - publisher: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer.""" - offer: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer.""" - sku: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or - 2019-Datacenter.""" - version: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be - specified to select the latest version of an Image. If omitted, the default is 'latest'.""" - virtual_machine_image_id: Optional[str] = rest_field( - name="virtualMachineImageId", visibility=["read", "create", "update", "delete", "query"] - ) - """The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will - be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is mutually exclusive with - other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same - region and must be in the same subscription as the Azure Batch account. If the image version is - not specified in the imageId, the latest version will be used. For information about the - firewall settings for the Batch Compute Node agent to communicate with the Batch service see - `https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration - `_.""" - exact_version: Optional[str] = rest_field(name="exactVersion", visibility=["read"]) - """The specific version of the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for 'version' when the pool - was created was 'latest'.""" - shared_gallery_image_id: Optional[str] = rest_field( - name="sharedGalleryImageId", visibility=["read", "create", "update", "delete", "query"] - ) - """The shared gallery image unique identifier. This property is mutually exclusive with other - properties and can be fetched from shared gallery image GET call.""" - community_gallery_image_id: Optional[str] = rest_field( - name="communityGalleryImageId", visibility=["read", "create", "update", "delete", "query"] - ) - """The community gallery image unique identifier. This property is mutually exclusive with other - properties and can be fetched from community gallery image GET call.""" - - @overload - def __init__( - self, - *, - publisher: Optional[str] = None, - offer: Optional[str] = None, - sku: Optional[str] = None, - version: Optional[str] = None, - virtual_machine_image_id: Optional[str] = None, - shared_gallery_image_id: Optional[str] = None, - community_gallery_image_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class InboundEndpoint(_model_base.Model): +class InboundEndpoint(_Model): """An inbound endpoint on a Compute Node. :ivar name: The name of the endpoint. Required. @@ -9405,125 +9615,25 @@ class InboundEndpoint(_model_base.Model): :ivar frontend_port: The public port number of the endpoint. Required. :vartype frontend_port: int :ivar backend_port: The backend port number of the endpoint. Required. - :vartype backend_port: int - """ - - name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The name of the endpoint. Required.""" - protocol: Union[str, "_models.InboundEndpointProtocol"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The protocol of the endpoint. Required. Known values are: \"tcp\" and \"udp\".""" - public_ip_address: str = rest_field( - name="publicIPAddress", visibility=["read", "create", "update", "delete", "query"] - ) - """The public IP address of the Compute Node. Required.""" - public_fqdn: str = rest_field(name="publicFQDN", visibility=["read", "create", "update", "delete", "query"]) - """The public fully qualified domain name for the Compute Node. Required.""" - frontend_port: int = rest_field(name="frontendPort", visibility=["read", "create", "update", "delete", "query"]) - """The public port number of the endpoint. Required.""" - backend_port: int = rest_field(name="backendPort", visibility=["read", "create", "update", "delete", "query"]) - """The backend port number of the endpoint. Required.""" - - @overload - def __init__( - self, - *, - name: str, - protocol: Union[str, "_models.InboundEndpointProtocol"], - public_ip_address: str, - public_fqdn: str, - frontend_port: int, - backend_port: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class InboundNatPool(_model_base.Model): - """A inbound NAT Pool that can be used to address specific ports on Compute Nodes - in a Batch Pool externally. - - :ivar name: The name of the endpoint. The name must be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, - must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid - values are provided the request fails with HTTP status code 400. Required. - :vartype name: str - :ivar protocol: The protocol of the endpoint. Required. Known values are: "tcp" and "udp". - :vartype protocol: str or ~azure.batch.models.InboundEndpointProtocol - :ivar backend_port: The port number on the Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are - reserved. If any reserved values are provided the request fails with HTTP status code 400. - Required. - :vartype backend_port: int - :ivar frontend_port_range_start: The first port number in the range of external ports that will - be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable - values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All - ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 - ports. If any reserved or overlapping values are provided the request fails with HTTP status - code 400. Required. - :vartype frontend_port_range_start: int - :ivar frontend_port_range_end: The last port number in the range of external ports that will be - used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable - values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the - Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or overlapping values are provided the request fails - with HTTP status code 400. Required. - :vartype frontend_port_range_end: int - :ivar network_security_group_rules: A list of network security group rules that will be applied - to the endpoint. The maximum number of rules that can be specified across all the endpoints on - a Batch Pool is 25. If no network security group rules are specified, a default rule will be - created to allow inbound access to the specified backendPort. If the maximum number of network - security group rules is exceeded the request fails with HTTP status code 400. - :vartype network_security_group_rules: list[~azure.batch.models.NetworkSecurityGroupRule] + :vartype backend_port: int """ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, - numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end - with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values - are provided the request fails with HTTP status code 400. Required.""" + """The name of the endpoint. Required.""" protocol: Union[str, "_models.InboundEndpointProtocol"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The protocol of the endpoint. Required. Known values are: \"tcp\" and \"udp\".""" - backend_port: int = rest_field(name="backendPort", visibility=["read", "create", "update", "delete", "query"]) - """The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values - are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved - values are provided the request fails with HTTP status code 400. Required.""" - frontend_port_range_start: int = rest_field( - name="frontendPortRangeStart", visibility=["read", "create", "update", "delete", "query"] - ) - """The first port number in the range of external ports that will be used to provide inbound - access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and - 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or - overlapping values are provided the request fails with HTTP status code 400. Required.""" - frontend_port_range_end: int = rest_field( - name="frontendPortRangeEnd", visibility=["read", "create", "update", "delete", "query"] - ) - """The last port number in the range of external ports that will be used to provide inbound access - to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a - Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the request fails with HTTP status code 400. - Required.""" - network_security_group_rules: Optional[List["_models.NetworkSecurityGroupRule"]] = rest_field( - name="networkSecurityGroupRules", visibility=["read", "create", "update", "delete", "query"] + public_ip_address: str = rest_field( + name="publicIPAddress", visibility=["read", "create", "update", "delete", "query"] ) - """A list of network security group rules that will be applied to the endpoint. The maximum number - of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network - security group rules are specified, a default rule will be created to allow inbound access to - the specified backendPort. If the maximum number of network security group rules is exceeded - the request fails with HTTP status code 400.""" + """The public IP address of the Compute Node. Required.""" + public_fqdn: str = rest_field(name="publicFQDN", visibility=["read", "create", "update", "delete", "query"]) + """The public fully qualified domain name for the Compute Node. Required.""" + frontend_port: int = rest_field(name="frontendPort", visibility=["read", "create", "update", "delete", "query"]) + """The public port number of the endpoint. Required.""" + backend_port: int = rest_field(name="backendPort", visibility=["read", "create", "update", "delete", "query"]) + """The backend port number of the endpoint. Required.""" @overload def __init__( @@ -9531,10 +9641,10 @@ def __init__( *, name: str, protocol: Union[str, "_models.InboundEndpointProtocol"], + public_ip_address: str, + public_fqdn: str, + frontend_port: int, backend_port: int, - frontend_port_range_start: int, - frontend_port_range_end: int, - network_security_group_rules: Optional[List["_models.NetworkSecurityGroupRule"]] = None, ) -> None: ... @overload @@ -9548,7 +9658,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class InstanceViewStatus(_model_base.Model): +class InstanceViewStatus(_Model): """The instance view status. :ivar code: The status code. @@ -9602,7 +9712,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class LinuxUserConfiguration(_model_base.Model): +class LinuxUserConfiguration(_Model): """Properties used to create a user Account on a Linux Compute Node. :ivar uid: The user ID of the user Account. The uid and gid properties must be specified @@ -9657,14 +9767,14 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ManagedDisk(_model_base.Model): +class ManagedDisk(_Model): """The managed disk parameters. :ivar storage_account_type: The storage account type for managed disk. Known values are: "standard_lrs", "premium_lrs", and "standardssd_lrs". :vartype storage_account_type: str or ~azure.batch.models.StorageAccountType :ivar security_profile: Specifies the security profile settings for the managed disk. - :vartype security_profile: ~azure.batch.models.VMDiskSecurityProfile + :vartype security_profile: ~azure.batch.models.VmDiskSecurityProfile """ storage_account_type: Optional[Union[str, "_models.StorageAccountType"]] = rest_field( @@ -9672,7 +9782,7 @@ class ManagedDisk(_model_base.Model): ) """The storage account type for managed disk. Known values are: \"standard_lrs\", \"premium_lrs\", and \"standardssd_lrs\".""" - security_profile: Optional["_models.VMDiskSecurityProfile"] = rest_field( + security_profile: Optional["_models.VmDiskSecurityProfile"] = rest_field( name="securityProfile", visibility=["read", "create", "update", "delete", "query"] ) """Specifies the security profile settings for the managed disk.""" @@ -9682,41 +9792,7 @@ def __init__( self, *, storage_account_type: Optional[Union[str, "_models.StorageAccountType"]] = None, - security_profile: Optional["_models.VMDiskSecurityProfile"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MetadataItem(_model_base.Model): - """The Batch service does not assign any meaning to this metadata; it is solely - for the use of user code. - - :ivar name: The name of the metadata item. Required. - :vartype name: str - :ivar value: The value of the metadata item. Required. - :vartype value: str - """ - - name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The name of the metadata item. Required.""" - value: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The value of the metadata item. Required.""" - - @overload - def __init__( - self, - *, - name: str, - value: str, + security_profile: Optional["_models.VmDiskSecurityProfile"] = None, ) -> None: ... @overload @@ -9730,7 +9806,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MountConfiguration(_model_base.Model): +class MountConfiguration(_Model): """The file system to mount on each node. :ivar azure_blob_file_system_configuration: The Azure Storage Container to mount using blob @@ -9790,7 +9866,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MultiInstanceSettings(_model_base.Model): +class MultiInstanceSettings(_Model): """Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, if any of the subtasks fail (for example due to exiting with a non-zero exit code) the entire multi-instance Task fails. The multi-instance Task is then @@ -9860,7 +9936,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class NameValuePair(_model_base.Model): +class NameValuePair(_Model): """Represents a name-value pair. :ivar name: The name in the name-value pair. @@ -9893,7 +9969,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class NetworkConfiguration(_model_base.Model): +class NetworkConfiguration(_Model): """The network configuration for a Pool. :ivar subnet_id: The ARM resource identifier of the virtual network subnet which the Compute @@ -9915,15 +9991,15 @@ class NetworkConfiguration(_model_base.Model): `https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration `_. :vartype subnet_id: str - :ivar dynamic_v_net_assignment_scope: The scope of dynamic vnet assignment. Known values are: + :ivar dynamic_vnet_assignment_scope: The scope of dynamic vnet assignment. Known values are: "none" and "job". - :vartype dynamic_v_net_assignment_scope: str or ~azure.batch.models.DynamicVNetAssignmentScope + :vartype dynamic_vnet_assignment_scope: str or ~azure.batch.models.DynamicVNetAssignmentScope :ivar endpoint_configuration: The configuration for endpoints on Compute Nodes in the Batch Pool. :vartype endpoint_configuration: ~azure.batch.models.BatchPoolEndpointConfiguration :ivar public_ip_address_configuration: The Public IPAddress configuration for Compute Nodes in the Batch Pool. - :vartype public_ip_address_configuration: ~azure.batch.models.PublicIpAddressConfiguration + :vartype public_ip_address_configuration: ~azure.batch.models.BatchPublicIpAddressConfiguration :ivar enable_accelerated_networking: Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: @@ -9951,7 +10027,7 @@ class NetworkConfiguration(_model_base.Model): 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: `https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration `_.""" - dynamic_v_net_assignment_scope: Optional[Union[str, "_models.DynamicVNetAssignmentScope"]] = rest_field( + dynamic_vnet_assignment_scope: Optional[Union[str, "_models.DynamicVNetAssignmentScope"]] = rest_field( name="dynamicVNetAssignmentScope", visibility=["read", "create", "update", "delete", "query"] ) """The scope of dynamic vnet assignment. Known values are: \"none\" and \"job\".""" @@ -9959,7 +10035,7 @@ class NetworkConfiguration(_model_base.Model): name="endpointConfiguration", visibility=["read", "create", "update", "delete", "query"] ) """The configuration for endpoints on Compute Nodes in the Batch Pool.""" - public_ip_address_configuration: Optional["_models.PublicIpAddressConfiguration"] = rest_field( + public_ip_address_configuration: Optional["_models.BatchPublicIpAddressConfiguration"] = rest_field( name="publicIPAddressConfiguration", visibility=["read", "create", "update", "delete", "query"] ) """The Public IPAddress configuration for Compute Nodes in the Batch Pool.""" @@ -9977,9 +10053,9 @@ def __init__( self, *, subnet_id: Optional[str] = None, - dynamic_v_net_assignment_scope: Optional[Union[str, "_models.DynamicVNetAssignmentScope"]] = None, + dynamic_vnet_assignment_scope: Optional[Union[str, "_models.DynamicVNetAssignmentScope"]] = None, endpoint_configuration: Optional["_models.BatchPoolEndpointConfiguration"] = None, - public_ip_address_configuration: Optional["_models.PublicIpAddressConfiguration"] = None, + public_ip_address_configuration: Optional["_models.BatchPublicIpAddressConfiguration"] = None, enable_accelerated_networking: Optional[bool] = None, ) -> None: ... @@ -9994,7 +10070,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class NetworkSecurityGroupRule(_model_base.Model): +class NetworkSecurityGroupRule(_Model): """A network security group rule to apply to an inbound endpoint. :ivar priority: The priority for this rule. Priorities within a Pool must be unique and are @@ -10068,7 +10144,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class NfsMountConfiguration(_model_base.Model): +class NfsMountConfiguration(_Model): """Information used to connect to an NFS file system. :ivar source: The URI of the file system to mount. Required. @@ -10116,72 +10192,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OSDisk(_model_base.Model): - """Settings for the operating system disk of the compute node (VM). - - :ivar ephemeral_os_disk_settings: Specifies the ephemeral Disk Settings for the operating - system disk used by the compute node (VM). - :vartype ephemeral_os_disk_settings: ~azure.batch.models.DiffDiskSettings - :ivar caching: Specifies the caching requirements. Possible values are: None, ReadOnly, - ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. - Known values are: "none", "readonly", and "readwrite". - :vartype caching: str or ~azure.batch.models.CachingType - :ivar disk_size_gb: The initial disk size in GB when creating new OS disk. - :vartype disk_size_gb: int - :ivar managed_disk: The managed disk parameters. - :vartype managed_disk: ~azure.batch.models.ManagedDisk - :ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or - disabled on the disk. - :vartype write_accelerator_enabled: bool - """ - - ephemeral_os_disk_settings: Optional["_models.DiffDiskSettings"] = rest_field( - name="ephemeralOSDiskSettings", visibility=["read", "create", "update", "delete", "query"] - ) - """Specifies the ephemeral Disk Settings for the operating system disk used by the compute node - (VM).""" - caching: Optional[Union[str, "_models.CachingType"]] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default - values are: None for Standard storage. ReadOnly for Premium storage. Known values are: - \"none\", \"readonly\", and \"readwrite\".""" - disk_size_gb: Optional[int] = rest_field( - name="diskSizeGB", visibility=["read", "create", "update", "delete", "query"] - ) - """The initial disk size in GB when creating new OS disk.""" - managed_disk: Optional["_models.ManagedDisk"] = rest_field( - name="managedDisk", visibility=["read", "create", "update", "delete", "query"] - ) - """The managed disk parameters.""" - write_accelerator_enabled: Optional[bool] = rest_field( - name="writeAcceleratorEnabled", visibility=["read", "create", "update", "delete", "query"] - ) - """Specifies whether writeAccelerator should be enabled or disabled on the disk.""" - - @overload - def __init__( - self, - *, - ephemeral_os_disk_settings: Optional["_models.DiffDiskSettings"] = None, - caching: Optional[Union[str, "_models.CachingType"]] = None, - disk_size_gb: Optional[int] = None, - managed_disk: Optional["_models.ManagedDisk"] = None, - write_accelerator_enabled: Optional[bool] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class OutputFile(_model_base.Model): +class OutputFile(_Model): """On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. @@ -10252,7 +10263,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OutputFileBlobContainerDestination(_model_base.Model): +class OutputFileBlobContainerDestination(_Model): """Specifies a file upload destination within an Azure blob storage container. :ivar path: The destination blob or virtual directory within the Azure Storage container. If @@ -10275,7 +10286,7 @@ class OutputFileBlobContainerDestination(_model_base.Model): on allowed headers when uploading blobs: `https://learn.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types `_. - :vartype upload_headers: list[~azure.batch.models.HttpHeader] + :vartype upload_headers: list[~azure.batch.models.OutputFileUploadHeader] """ path: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -10294,7 +10305,7 @@ class OutputFileBlobContainerDestination(_model_base.Model): ) """The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container.""" - upload_headers: Optional[List["_models.HttpHeader"]] = rest_field( + upload_headers: Optional[List["_models.OutputFileUploadHeader"]] = rest_field( name="uploadHeaders", visibility=["read", "create", "update", "delete", "query"] ) """A list of name-value pairs for headers to be used in uploading output files. These headers will @@ -10310,7 +10321,7 @@ def __init__( container_url: str, path: Optional[str] = None, identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, - upload_headers: Optional[List["_models.HttpHeader"]] = None, + upload_headers: Optional[List["_models.OutputFileUploadHeader"]] = None, ) -> None: ... @overload @@ -10324,7 +10335,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OutputFileDestination(_model_base.Model): +class OutputFileDestination(_Model): """The destination to which a file should be uploaded. :ivar container: A location in Azure blob storage to which files are uploaded. @@ -10354,7 +10365,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OutputFileUploadConfig(_model_base.Model): +class OutputFileUploadConfig(_Model): """Options for an output file upload operation, including under what conditions to perform the upload. @@ -10389,43 +10400,27 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class PublicIpAddressConfiguration(_model_base.Model): - """The public IP Address configuration of the networking configuration of a Pool. +class OutputFileUploadHeader(_Model): + """An HTTP header name-value pair. - :ivar ip_address_provisioning_type: The provisioning type for Public IP Addresses for the Pool. - The default value is BatchManaged. Known values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - :vartype ip_address_provisioning_type: str or ~azure.batch.models.IpAddressProvisioningType - :ivar ip_address_ids: The list of public IPs which the Batch service will use when provisioning - Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 - dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - :vartype ip_address_ids: list[str] + :ivar name: The case-insensitive name of the header to be used while uploading output files. + Required. + :vartype name: str + :ivar value: The value of the header to be used while uploading output files. + :vartype value: str """ - ip_address_provisioning_type: Optional[Union[str, "_models.IpAddressProvisioningType"]] = rest_field( - name="provision", visibility=["read", "create", "update", "delete", "query"] - ) - """The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. - Known values are: \"batchmanaged\", \"usermanaged\", and \"nopublicipaddresses\".""" - ip_address_ids: Optional[List[str]] = rest_field( - name="ipAddressIds", visibility=["read", "create", "update", "delete", "query"] - ) - """The list of public IPs which the Batch service will use when provisioning Compute Nodes. The - number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 - Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 - dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of - the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The case-insensitive name of the header to be used while uploading output files. Required.""" + value: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The value of the header to be used while uploading output files.""" @overload def __init__( self, *, - ip_address_provisioning_type: Optional[Union[str, "_models.IpAddressProvisioningType"]] = None, - ip_address_ids: Optional[List[str]] = None, + name: str, + value: Optional[str] = None, ) -> None: ... @overload @@ -10439,7 +10434,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class RecentBatchJob(_model_base.Model): +class RecentBatchJob(_Model): """Information about the most recent Job to run under the Job Schedule. :ivar id: The ID of the Job. @@ -10472,7 +10467,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ResizeError(_model_base.Model): +class ResizeError(_Model): """An error that occurred when resizing a Pool. :ivar code: An identifier for the Pool resize error. Codes are invariant and are intended to be @@ -10516,7 +10511,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ResourceFile(_model_base.Model): +class ResourceFile(_Model): """A single file or multiple files to be downloaded to a Compute Node. :ivar auto_storage_container_name: The storage container name in the auto storage Account. The @@ -10636,7 +10631,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class RollingUpgradePolicy(_model_base.Model): +class RollingUpgradePolicy(_Model): """The configuration parameters used while performing a rolling upgrade. :ivar enable_cross_zone_upgrade: Allow VMSS to ignore AZ boundaries when constructing upgrade @@ -10747,7 +10742,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class SecurityProfile(_model_base.Model): +class SecurityProfile(_Model): """Specifies the security profile settings for the virtual machine or virtual machine scale set. :ivar encryption_at_host: This property can be used by user in the request to enable or disable @@ -10765,7 +10760,7 @@ class SecurityProfile(_model_base.Model): :ivar uefi_settings: Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Required. - :vartype uefi_settings: ~azure.batch.models.UefiSettings + :vartype uefi_settings: ~azure.batch.models.BatchUefiSettings """ encryption_at_host: bool = rest_field( @@ -10783,7 +10778,7 @@ class SecurityProfile(_model_base.Model): ) """Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. Required. Known values are: \"trustedLaunch\" and \"confidentialVM\".""" - uefi_settings: "_models.UefiSettings" = rest_field( + uefi_settings: "_models.BatchUefiSettings" = rest_field( name="uefiSettings", visibility=["read", "create", "update", "delete", "query"] ) """Specifies the security settings like secure boot and vTPM used while creating the virtual @@ -10796,7 +10791,7 @@ def __init__( *, encryption_at_host: bool, security_type: Union[str, "_models.SecurityTypes"], - uefi_settings: "_models.UefiSettings", + uefi_settings: "_models.BatchUefiSettings", ) -> None: ... @overload @@ -10810,7 +10805,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ServiceArtifactReference(_model_base.Model): +class ServiceArtifactReference(_Model): """Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. @@ -10845,46 +10840,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UefiSettings(_model_base.Model): - """Specifies the security settings like secure boot and vTPM used while creating the virtual - machine. - - :ivar secure_boot_enabled: Specifies whether secure boot should be enabled on the virtual - machine. - :vartype secure_boot_enabled: bool - :ivar v_tpm_enabled: Specifies whether vTPM should be enabled on the virtual machine. - :vartype v_tpm_enabled: bool - """ - - secure_boot_enabled: Optional[bool] = rest_field( - name="secureBootEnabled", visibility=["read", "create", "update", "delete", "query"] - ) - """Specifies whether secure boot should be enabled on the virtual machine.""" - v_tpm_enabled: Optional[bool] = rest_field( - name="vTpmEnabled", visibility=["read", "create", "update", "delete", "query"] - ) - """Specifies whether vTPM should be enabled on the virtual machine.""" - - @overload - def __init__( - self, - *, - secure_boot_enabled: Optional[bool] = None, - v_tpm_enabled: Optional[bool] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class UpgradePolicy(_model_base.Model): +class UpgradePolicy(_Model): """Describes an upgrade policy - automatic, manual, or rolling. :ivar mode: Specifies the mode of an upgrade to virtual machines in the scale set.

@@ -10939,7 +10895,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UploadBatchServiceLogsContent(_model_base.Model): +class UploadBatchServiceLogsOptions(_Model): """The Azure Batch service log files upload parameters for a Compute Node. :ivar container_url: The URL of the container within Azure Blob Storage to which to upload the @@ -11013,7 +10969,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UploadBatchServiceLogsResult(_model_base.Model): +class UploadBatchServiceLogsResult(_Model): """The result of uploading Batch service log files from a specific Compute Node. :ivar virtual_directory_name: The virtual directory within Azure Blob Storage container to @@ -11055,7 +11011,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UserAccount(_model_base.Model): +class UserAccount(_Model): """Properties used to create a user used to execute Tasks on an Azure Batch Compute Node. @@ -11121,43 +11077,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UserAssignedIdentity(_model_base.Model): - """The user assigned Identity. - - :ivar resource_id: The ARM resource id of the user assigned identity. Required. - :vartype resource_id: str - :ivar client_id: The client id of the user assigned identity. - :vartype client_id: str - :ivar principal_id: The principal id of the user assigned identity. - :vartype principal_id: str - """ - - resource_id: str = rest_field(name="resourceId", visibility=["read", "create", "update", "delete", "query"]) - """The ARM resource id of the user assigned identity. Required.""" - client_id: Optional[str] = rest_field(name="clientId", visibility=["read"]) - """The client id of the user assigned identity.""" - principal_id: Optional[str] = rest_field(name="principalId", visibility=["read"]) - """The principal id of the user assigned identity.""" - - @overload - def __init__( - self, - *, - resource_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class UserIdentity(_model_base.Model): +class UserIdentity(_Model): """The definition of the user identity under which the Task is run. Specify either the userName or autoUser property, but not both. @@ -11197,13 +11117,13 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VirtualMachineConfiguration(_model_base.Model): +class VirtualMachineConfiguration(_Model): """The configuration for Compute Nodes in a Pool based on the Azure Virtual Machines infrastructure. :ivar image_reference: A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. Required. - :vartype image_reference: ~azure.batch.models.ImageReference + :vartype image_reference: ~azure.batch.models.BatchVmImageReference :ivar node_agent_sku_id: The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch @@ -11242,7 +11162,7 @@ class VirtualMachineConfiguration(_model_base.Model): performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. - :vartype container_configuration: ~azure.batch.models.ContainerConfiguration + :vartype container_configuration: ~azure.batch.models.BatchContainerConfiguration :ivar disk_encryption_configuration: The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. :vartype disk_encryption_configuration: ~azure.batch.models.DiskEncryptionConfiguration @@ -11253,7 +11173,7 @@ class VirtualMachineConfiguration(_model_base.Model): mentioned in this configuration will be installed on each node. :vartype extensions: list[~azure.batch.models.VMExtension] :ivar os_disk: Settings for the operating system disk of the Virtual Machine. - :vartype os_disk: ~azure.batch.models.OSDisk + :vartype os_disk: ~azure.batch.models.BatchOsDisk :ivar security_profile: Specifies the security profile settings for the virtual machine or virtual machine scale set. :vartype security_profile: ~azure.batch.models.SecurityProfile @@ -11264,7 +11184,7 @@ class VirtualMachineConfiguration(_model_base.Model): :vartype service_artifact_reference: ~azure.batch.models.ServiceArtifactReference """ - image_reference: "_models.ImageReference" = rest_field( + image_reference: "_models.BatchVmImageReference" = rest_field( name="imageReference", visibility=["read", "create", "update", "delete", "query"] ) """A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image @@ -11309,7 +11229,7 @@ class VirtualMachineConfiguration(_model_base.Model): Windows_Server - The on-premises license is for Windows Server. Windows_Client - The on-premises license is for Windows Client.""" - container_configuration: Optional["_models.ContainerConfiguration"] = rest_field( + container_configuration: Optional["_models.BatchContainerConfiguration"] = rest_field( name="containerConfiguration", visibility=["read", "create", "update", "delete", "query"] ) """The container configuration for the Pool. If specified, setup is performed on each Compute Node @@ -11330,7 +11250,7 @@ class VirtualMachineConfiguration(_model_base.Model): ) """The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node.""" - os_disk: Optional["_models.OSDisk"] = rest_field( + os_disk: Optional["_models.BatchOsDisk"] = rest_field( name="osDisk", visibility=["read", "create", "update", "delete", "query"] ) """Settings for the operating system disk of the Virtual Machine.""" @@ -11350,16 +11270,16 @@ class VirtualMachineConfiguration(_model_base.Model): def __init__( self, *, - image_reference: "_models.ImageReference", + image_reference: "_models.BatchVmImageReference", node_agent_sku_id: str, windows_configuration: Optional["_models.WindowsConfiguration"] = None, data_disks: Optional[List["_models.DataDisk"]] = None, license_type: Optional[str] = None, - container_configuration: Optional["_models.ContainerConfiguration"] = None, + container_configuration: Optional["_models.BatchContainerConfiguration"] = None, disk_encryption_configuration: Optional["_models.DiskEncryptionConfiguration"] = None, node_placement_configuration: Optional["_models.BatchNodePlacementConfiguration"] = None, extensions: Optional[List["_models.VMExtension"]] = None, - os_disk: Optional["_models.OSDisk"] = None, + os_disk: Optional["_models.BatchOsDisk"] = None, security_profile: Optional["_models.SecurityProfile"] = None, service_artifact_reference: Optional["_models.ServiceArtifactReference"] = None, ) -> None: ... @@ -11375,18 +11295,18 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VirtualMachineInfo(_model_base.Model): +class VirtualMachineInfo(_Model): """Info about the current state of the virtual machine. :ivar image_reference: The reference to the Azure Virtual Machine's Marketplace Image. - :vartype image_reference: ~azure.batch.models.ImageReference + :vartype image_reference: ~azure.batch.models.BatchVmImageReference :ivar scale_set_vm_resource_id: The resource ID of the Compute Node's current Virtual Machine Scale Set VM. Only defined if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. :vartype scale_set_vm_resource_id: str """ - image_reference: Optional["_models.ImageReference"] = rest_field( + image_reference: Optional["_models.BatchVmImageReference"] = rest_field( name="imageReference", visibility=["read", "create", "update", "delete", "query"] ) """The reference to the Azure Virtual Machine's Marketplace Image.""" @@ -11400,7 +11320,7 @@ class VirtualMachineInfo(_model_base.Model): def __init__( self, *, - image_reference: Optional["_models.ImageReference"] = None, + image_reference: Optional["_models.BatchVmImageReference"] = None, scale_set_vm_resource_id: Optional[str] = None, ) -> None: ... @@ -11415,7 +11335,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VMDiskSecurityProfile(_model_base.Model): +class VmDiskSecurityProfile(_Model): """Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. @@ -11453,7 +11373,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VMExtension(_model_base.Model): +class VMExtension(_Model): """The configuration for virtual machine extensions. :ivar name: The name of the virtual machine extension. Required. @@ -11541,7 +11461,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VMExtensionInstanceView(_model_base.Model): +class VMExtensionInstanceView(_Model): """The vm extension instance view. :ivar name: The name of the vm extension instance view. @@ -11583,7 +11503,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class WindowsConfiguration(_model_base.Model): +class WindowsConfiguration(_Model): """Windows operating system settings to apply to the virtual machine. :ivar enable_automatic_updates: Whether automatic updates are enabled on the virtual machine. @@ -11615,7 +11535,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class WindowsUserConfiguration(_model_base.Model): +class WindowsUserConfiguration(_Model): """Properties used to create a user Account on a Windows Compute Node. :ivar login_mode: The login mode for the user. The default is 'batch'. Known values are: diff --git a/sdk/batch/azure-batch/azure/batch/models/_patch.py b/sdk/batch/azure-batch/azure/batch/models/_patch.py index 6435ce16c022..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/models/_patch.py @@ -1,123 +1,16 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import datetime -from typing import List, Any, Optional +from typing import List -from azure.core.exceptions import HttpResponseError +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level -from ._models import BatchPoolReplaceContent as BatchPoolReplaceContentGenerated -from .._model_base import rest_field - -__all__: List[str] = [ - "CreateTasksError", - "BatchFileProperties", -] # Add all objects you want publicly available to users at this package level - -class CreateTasksError(HttpResponseError): - """Aggregate Exception containing details for any failures from a task add operation. - - :param str message: Error message describing exit reason - :param [~TaskAddParameter] pending_tasks: List of tasks remaining to be submitted. - :param [~TaskAddResult] failure_tasks: List of tasks which failed to add - :param [~Exception] errors: List of unknown errors forcing early termination - """ - - def __init__(self, pending_tasks=[], failure_tasks=[], errors=[]): - self.pending_tasks = pending_tasks - self.failure_tasks = failure_tasks - self.errors = errors - if failure_tasks and errors: - self.message = ( - "Multiple errors encountered. Check the `failure_tasks` and " - "`errors` properties for additional details." - ) - elif errors: - if len(errors) > 1: - self.message = ( - "Multiple errors occurred when submitting add_collection " - "requests. Check the `errors` property for the inner " - "exceptions." - ) - else: - self.message = str(errors[0]) - elif failure_tasks: - if len(failure_tasks) > 1: - self.message = ( - "Multiple client side errors occurred when adding the " - "tasks. Check the `failure_tasks` property for details on" - " these tasks." - ) - else: - result = failure_tasks[0] - self.message = "Task with id `%s` failed due to client error - %s::%s" % ( - result.task_id, - result.error.code, - result.error.message, - ) - super(CreateTasksError, self).__init__(self.message) - -class BatchFileProperties: - - """Information about a file or directory on a Compute Node with additional properties. - - :ivar url: The URL of the file. - :vartype url: str - :ivar is_directory: Whether the object represents a directory. - :vartype is_directory: bool - :ivar creation_time: The file creation time. The creation time is not returned for files on - Linux Compute Nodes. - :vartype creation_time: ~datetime.datetime - :ivar last_modified: The time at which the file was last modified. Required. - :vartype last_modified: ~datetime.datetime - :ivar content_length: The length of the file. Required. - :vartype content_length: int - :ivar content_type: The content type of the file. - :vartype content_type: str - :ivar file_mode: The file mode attribute in octal format. The file mode is returned only for - files on Linux Compute Nodes. - :vartype file_mode: str - """ - - url: Optional[str] - """The URL of the file.""" - is_directory: Optional[bool] - """Whether the object represents a directory.""" - creation_time: Optional[datetime.datetime] - """The file creation time. The creation time is not returned for files on Linux Compute Nodes.""" - last_modified: datetime.datetime - """The time at which the file was last modified. Required.""" - content_length: int - """The length of the file. Required.""" - content_type: Optional[str] - """The content type of the file.""" - file_mode: Optional[str] - """The file mode attribute in octal format. The file mode is returned only for files on Linux - Compute Nodes.""" - - def __init__( - self, - *, - url: Optional[str] = None, - is_directory: Optional[bool] = None, - last_modified: datetime.datetime, - content_length: int, - creation_time: Optional[datetime.datetime] = None, - content_type: Optional[str] = None, - file_mode: Optional[str] = None, - ) -> None: - self.url = url - self.is_directory = is_directory - self.creation_time = creation_time - self.last_modified = last_modified - self.content_length = content_length - self.content_type = content_type - self.file_mode = file_mode def patch_sdk(): """Do not remove from this file. diff --git a/sdk/batch/azure-batch/setup.py b/sdk/batch/azure-batch/setup.py index c8218eb0bece..82d04d01a4e4 100644 --- a/sdk/batch/azure-batch/setup.py +++ b/sdk/batch/azure-batch/setup.py @@ -5,7 +5,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -# coding: utf-8 + import os import re @@ -29,7 +29,7 @@ setup( name=PACKAGE_NAME, version=version, - description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + description="Microsoft Corporation {} Client Library for Python".format(PACKAGE_PPRINT_NAME), long_description=open("README.md", "r").read(), long_description_content_type="text/markdown", license="MIT License", @@ -42,7 +42,6 @@ "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -66,5 +65,5 @@ "azure-core>=1.30.0", "typing-extensions>=4.6.0", ], - python_requires=">=3.8", + python_requires=">=3.9", ) diff --git a/sdk/batch/azure-batch/tests/test_batch.py b/sdk/batch/azure-batch/tests/test_batch.py index 51e33cb2465b..cf98cfbd6c49 100644 --- a/sdk/batch/azure-batch/tests/test_batch.py +++ b/sdk/batch/azure-batch/tests/test_batch.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=too-many-lines,line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- @@ -85,7 +85,7 @@ async def assertCreateTasksError(self, code, func, *args, **kwargs): pytest.fail("Inner BatchErrorException expected but not exist") except Exception as err: pytest.fail("Expected CreateTasksError, instead got: {!r}".format(err)) - + @CachedResourceGroupPreparer(location=AZURE_LOCATION) @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT) @pytest.mark.parametrize("BatchClient", [SyncBatchClient, AsyncBatchClient], ids=["sync", "async"]) diff --git a/sdk/batch/azure-batch/tsp-location.yaml b/sdk/batch/azure-batch/tsp-location.yaml index 96f30af8578f..dfa636e3fa57 100644 --- a/sdk/batch/azure-batch/tsp-location.yaml +++ b/sdk/batch/azure-batch/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/batch/Azure.Batch -commit: e7ed63002df7e9cc1d3e4cd139d76c4d7040acd3 +commit: da5f436da0537251f7336b56f1e2df48c634d147 repo: Azure/azure-rest-api-specs additionalDirectories: