From 53cecd2328e58a7a427c8d34956c62e91825491a Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Mon, 14 Apr 2025 17:34:31 +0000 Subject: [PATCH] CodeGen from PR 32768 in Azure/azure-rest-api-specs Merge 5cd43738852c5940749809a7be9fc7f56f7c77d6 into 21398145c857d35b4c0ab773ff4e79d8dd464eb1 --- sdk/batch/azure-batch/MANIFEST.in | 2 +- sdk/batch/azure-batch/_meta.json | 6 + sdk/batch/azure-batch/apiview-properties.json | 286 +++++++++ .../azure-batch/azure/batch/_model_base.py | 5 +- .../azure/batch/_operations/_operations.py | 62 +- .../azure/batch/_operations/_patch.py | 561 +----------------- sdk/batch/azure-batch/azure/batch/_patch.py | 161 +---- .../azure-batch/azure/batch/_serialization.py | 26 +- sdk/batch/azure-batch/azure/batch/_version.py | 2 +- .../batch/aio/_operations/_operations.py | 52 +- .../azure/batch/aio/_operations/_patch.py | 539 +---------------- .../azure-batch/azure/batch/aio/_patch.py | 54 +- .../azure/batch/models/__init__.py | 2 - .../azure-batch/azure/batch/models/_models.py | 82 --- .../azure-batch/azure/batch/models/_patch.py | 121 +--- sdk/batch/azure-batch/setup.py | 4 +- sdk/batch/azure-batch/tests/test_batch.py | 6 +- sdk/batch/azure-batch/tsp-location.yaml | 2 +- 18 files changed, 397 insertions(+), 1576 deletions(-) create mode 100644 sdk/batch/azure-batch/_meta.json create mode 100644 sdk/batch/azure-batch/apiview-properties.json diff --git a/sdk/batch/azure-batch/MANIFEST.in b/sdk/batch/azure-batch/MANIFEST.in index cb1e2b1128cb..06208ba9fde2 100644 --- a/sdk/batch/azure-batch/MANIFEST.in +++ b/sdk/batch/azure-batch/MANIFEST.in @@ -3,4 +3,4 @@ include LICENSE include azure/batch/py.typed recursive-include tests *.py recursive-include samples *.py *.md -include azure/__init__.py \ No newline at end of file +include azure/__init__.py diff --git a/sdk/batch/azure-batch/_meta.json b/sdk/batch/azure-batch/_meta.json new file mode 100644 index 000000000000..9353d3ccccb5 --- /dev/null +++ b/sdk/batch/azure-batch/_meta.json @@ -0,0 +1,6 @@ +{ + "commit": "167d2be6a8c000e27109c75f9133218e92c64724", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/batch/Azure.Batch", + "@azure-tools/typespec-python": "0.42.2" +} \ No newline at end of file diff --git a/sdk/batch/azure-batch/apiview-properties.json b/sdk/batch/azure-batch/apiview-properties.json new file mode 100644 index 000000000000..54819e20dea8 --- /dev/null +++ b/sdk/batch/azure-batch/apiview-properties.json @@ -0,0 +1,286 @@ +{ + "CrossLanguagePackageId": "Azure.Batch", + "CrossLanguageDefinitionId": { + "azure.batch.models.AffinityInfo": "Azure.Batch.AffinityInfo", + "azure.batch.models.AuthenticationTokenSettings": "Azure.Batch.AuthenticationTokenSettings", + "azure.batch.models.AutomaticOsUpgradePolicy": "Azure.Batch.AutomaticOsUpgradePolicy", + "azure.batch.models.AutoScaleRun": "Azure.Batch.AutoScaleRun", + "azure.batch.models.AutoScaleRunError": "Azure.Batch.AutoScaleRunError", + "azure.batch.models.AutoUserSpecification": "Azure.Batch.AutoUserSpecification", + "azure.batch.models.AzureBlobFileSystemConfiguration": "Azure.Batch.AzureBlobFileSystemConfiguration", + "azure.batch.models.AzureFileShareConfiguration": "Azure.Batch.AzureFileShareConfiguration", + "azure.batch.models.BatchApplication": "Azure.Batch.BatchApplication", + "azure.batch.models.BatchApplicationPackageReference": "Azure.Batch.BatchApplicationPackageReference", + "azure.batch.models.BatchAutoPoolSpecification": "Azure.Batch.BatchAutoPoolSpecification", + "azure.batch.models.BatchCertificate": "Azure.Batch.BatchCertificate", + "azure.batch.models.BatchCertificateReference": "Azure.Batch.BatchCertificateReference", + "azure.batch.models.BatchError": "Azure.Batch.BatchError", + "azure.batch.models.BatchErrorDetail": "Azure.Batch.BatchErrorDetail", + "azure.batch.models.BatchErrorMessage": "Azure.Batch.BatchErrorMessage", + "azure.batch.models.BatchJob": "Azure.Batch.BatchJob", + "azure.batch.models.BatchJobConstraints": "Azure.Batch.BatchJobConstraints", + "azure.batch.models.BatchJobCreateContent": "Azure.Batch.BatchJobCreateContent", + "azure.batch.models.BatchJobDisableContent": "Azure.Batch.BatchJobDisableContent", + "azure.batch.models.BatchJobExecutionInfo": "Azure.Batch.BatchJobExecutionInfo", + "azure.batch.models.BatchJobManagerTask": "Azure.Batch.BatchJobManagerTask", + "azure.batch.models.BatchJobNetworkConfiguration": "Azure.Batch.BatchJobNetworkConfiguration", + "azure.batch.models.BatchJobPreparationAndReleaseTaskStatus": "Azure.Batch.BatchJobPreparationAndReleaseTaskStatus", + "azure.batch.models.BatchJobPreparationTask": "Azure.Batch.BatchJobPreparationTask", + "azure.batch.models.BatchJobPreparationTaskExecutionInfo": "Azure.Batch.BatchJobPreparationTaskExecutionInfo", + "azure.batch.models.BatchJobReleaseTask": "Azure.Batch.BatchJobReleaseTask", + "azure.batch.models.BatchJobReleaseTaskExecutionInfo": "Azure.Batch.BatchJobReleaseTaskExecutionInfo", + "azure.batch.models.BatchJobSchedule": "Azure.Batch.BatchJobSchedule", + "azure.batch.models.BatchJobScheduleConfiguration": "Azure.Batch.BatchJobScheduleConfiguration", + "azure.batch.models.BatchJobScheduleCreateContent": "Azure.Batch.BatchJobScheduleCreateContent", + "azure.batch.models.BatchJobScheduleExecutionInfo": "Azure.Batch.BatchJobScheduleExecutionInfo", + "azure.batch.models.BatchJobScheduleStatistics": "Azure.Batch.BatchJobScheduleStatistics", + "azure.batch.models.BatchJobScheduleUpdateContent": "Azure.Batch.BatchJobScheduleUpdateContent", + "azure.batch.models.BatchJobSchedulingError": "Azure.Batch.BatchJobSchedulingError", + "azure.batch.models.BatchJobSpecification": "Azure.Batch.BatchJobSpecification", + "azure.batch.models.BatchJobStatistics": "Azure.Batch.BatchJobStatistics", + "azure.batch.models.BatchJobTerminateContent": "Azure.Batch.BatchJobTerminateContent", + "azure.batch.models.BatchJobUpdateContent": "Azure.Batch.BatchJobUpdateContent", + "azure.batch.models.BatchNode": "Azure.Batch.BatchNode", + "azure.batch.models.BatchNodeAgentInfo": "Azure.Batch.BatchNodeAgentInfo", + "azure.batch.models.BatchNodeCounts": "Azure.Batch.BatchNodeCounts", + "azure.batch.models.BatchNodeDeallocateContent": "Azure.Batch.BatchNodeDeallocateContent", + "azure.batch.models.BatchNodeDisableSchedulingContent": "Azure.Batch.BatchNodeDisableSchedulingContent", + "azure.batch.models.BatchNodeEndpointConfiguration": "Azure.Batch.BatchNodeEndpointConfiguration", + "azure.batch.models.BatchNodeError": "Azure.Batch.BatchNodeError", + "azure.batch.models.BatchNodeFile": "Azure.Batch.BatchNodeFile", + "azure.batch.models.BatchNodeIdentityReference": "Azure.Batch.BatchNodeIdentityReference", + "azure.batch.models.BatchNodeInfo": "Azure.Batch.BatchNodeInfo", + "azure.batch.models.BatchNodePlacementConfiguration": "Azure.Batch.BatchNodePlacementConfiguration", + "azure.batch.models.BatchNodeRebootContent": "Azure.Batch.BatchNodeRebootContent", + "azure.batch.models.BatchNodeReimageContent": "Azure.Batch.BatchNodeReimageContent", + "azure.batch.models.BatchNodeRemoteLoginSettings": "Azure.Batch.BatchNodeRemoteLoginSettings", + "azure.batch.models.BatchNodeRemoveContent": "Azure.Batch.BatchNodeRemoveContent", + "azure.batch.models.BatchNodeUserCreateContent": "Azure.Batch.BatchNodeUserCreateContent", + "azure.batch.models.BatchNodeUserUpdateContent": "Azure.Batch.BatchNodeUserUpdateContent", + "azure.batch.models.BatchNodeVMExtension": "Azure.Batch.BatchNodeVMExtension", + "azure.batch.models.BatchPool": "Azure.Batch.BatchPool", + "azure.batch.models.BatchPoolCreateContent": "Azure.Batch.BatchPoolCreateContent", + "azure.batch.models.BatchPoolEnableAutoScaleContent": "Azure.Batch.BatchPoolEnableAutoScaleContent", + "azure.batch.models.BatchPoolEndpointConfiguration": "Azure.Batch.BatchPoolEndpointConfiguration", + "azure.batch.models.BatchPoolEvaluateAutoScaleContent": "Azure.Batch.BatchPoolEvaluateAutoScaleContent", + "azure.batch.models.BatchPoolIdentity": "Azure.Batch.BatchPoolIdentity", + "azure.batch.models.BatchPoolInfo": "Azure.Batch.BatchPoolInfo", + "azure.batch.models.BatchPoolNodeCounts": "Azure.Batch.BatchPoolNodeCounts", + "azure.batch.models.BatchPoolReplaceContent": "Azure.Batch.BatchPoolReplaceContent", + "azure.batch.models.BatchPoolResizeContent": "Azure.Batch.BatchPoolResizeContent", + "azure.batch.models.BatchPoolResourceStatistics": "Azure.Batch.BatchPoolResourceStatistics", + "azure.batch.models.BatchPoolSpecification": "Azure.Batch.BatchPoolSpecification", + "azure.batch.models.BatchPoolStatistics": "Azure.Batch.BatchPoolStatistics", + "azure.batch.models.BatchPoolUpdateContent": "Azure.Batch.BatchPoolUpdateContent", + "azure.batch.models.BatchPoolUsageMetrics": "Azure.Batch.BatchPoolUsageMetrics", + "azure.batch.models.BatchPoolUsageStatistics": "Azure.Batch.BatchPoolUsageStatistics", + "azure.batch.models.BatchStartTask": "Azure.Batch.BatchStartTask", + "azure.batch.models.BatchStartTaskInfo": "Azure.Batch.BatchStartTaskInfo", + "azure.batch.models.BatchSubtask": "Azure.Batch.BatchSubtask", + "azure.batch.models.BatchSupportedImage": "Azure.Batch.BatchSupportedImage", + "azure.batch.models.BatchTask": "Azure.Batch.BatchTask", + "azure.batch.models.BatchTaskAddCollectionResult": "Azure.Batch.BatchTaskAddCollectionResult", + "azure.batch.models.BatchTaskAddResult": "Azure.Batch.BatchTaskAddResult", + "azure.batch.models.BatchTaskConstraints": "Azure.Batch.BatchTaskConstraints", + "azure.batch.models.BatchTaskContainerExecutionInfo": "Azure.Batch.BatchTaskContainerExecutionInfo", + "azure.batch.models.BatchTaskContainerSettings": "Azure.Batch.BatchTaskContainerSettings", + "azure.batch.models.BatchTaskCounts": "Azure.Batch.BatchTaskCounts", + "azure.batch.models.BatchTaskCountsResult": "Azure.Batch.BatchTaskCountsResult", + "azure.batch.models.BatchTaskCreateContent": "Azure.Batch.BatchTaskCreateContent", + "azure.batch.models.BatchTaskDependencies": "Azure.Batch.BatchTaskDependencies", + "azure.batch.models.BatchTaskExecutionInfo": "Azure.Batch.BatchTaskExecutionInfo", + "azure.batch.models.BatchTaskFailureInfo": "Azure.Batch.BatchTaskFailureInfo", + "azure.batch.models.BatchTaskGroup": "Azure.Batch.BatchTaskGroup", + "azure.batch.models.BatchTaskIdRange": "Azure.Batch.BatchTaskIdRange", + "azure.batch.models.BatchTaskInfo": "Azure.Batch.BatchTaskInfo", + "azure.batch.models.BatchTaskSchedulingPolicy": "Azure.Batch.BatchTaskSchedulingPolicy", + "azure.batch.models.BatchTaskSlotCounts": "Azure.Batch.BatchTaskSlotCounts", + "azure.batch.models.BatchTaskStatistics": "Azure.Batch.BatchTaskStatistics", + "azure.batch.models.CifsMountConfiguration": "Azure.Batch.CifsMountConfiguration", + "azure.batch.models.ContainerConfiguration": "Azure.Batch.ContainerConfiguration", + "azure.batch.models.ContainerHostBatchBindMountEntry": "Azure.Batch.ContainerHostBatchBindMountEntry", + "azure.batch.models.ContainerRegistryReference": "Azure.Batch.ContainerRegistryReference", + "azure.batch.models.DataDisk": "Azure.Batch.DataDisk", + "azure.batch.models.DeleteBatchCertificateError": "Azure.Batch.DeleteBatchCertificateError", + "azure.batch.models.DiffDiskSettings": "Azure.Batch.DiffDiskSettings", + "azure.batch.models.DiskEncryptionConfiguration": "Azure.Batch.DiskEncryptionConfiguration", + "azure.batch.models.EnvironmentSetting": "Azure.Batch.EnvironmentSetting", + "azure.batch.models.ExitCodeMapping": "Azure.Batch.ExitCodeMapping", + "azure.batch.models.ExitCodeRangeMapping": "Azure.Batch.ExitCodeRangeMapping", + "azure.batch.models.ExitConditions": "Azure.Batch.ExitConditions", + "azure.batch.models.ExitOptions": "Azure.Batch.ExitOptions", + "azure.batch.models.FileProperties": "Azure.Batch.FileProperties", + "azure.batch.models.HttpHeader": "Azure.Batch.HttpHeader", + "azure.batch.models.ImageReference": "Azure.Batch.ImageReference", + "azure.batch.models.InboundEndpoint": "Azure.Batch.InboundEndpoint", + "azure.batch.models.InboundNatPool": "Azure.Batch.InboundNatPool", + "azure.batch.models.InstanceViewStatus": "Azure.Batch.InstanceViewStatus", + "azure.batch.models.LinuxUserConfiguration": "Azure.Batch.LinuxUserConfiguration", + "azure.batch.models.ManagedDisk": "Azure.Batch.ManagedDisk", + "azure.batch.models.MetadataItem": "Azure.Batch.MetadataItem", + "azure.batch.models.MountConfiguration": "Azure.Batch.MountConfiguration", + "azure.batch.models.MultiInstanceSettings": "Azure.Batch.MultiInstanceSettings", + "azure.batch.models.NameValuePair": "Azure.Batch.NameValuePair", + "azure.batch.models.NetworkConfiguration": "Azure.Batch.NetworkConfiguration", + "azure.batch.models.NetworkSecurityGroupRule": "Azure.Batch.NetworkSecurityGroupRule", + "azure.batch.models.NfsMountConfiguration": "Azure.Batch.NfsMountConfiguration", + "azure.batch.models.OSDisk": "Azure.Batch.OSDisk", + "azure.batch.models.OutputFile": "Azure.Batch.OutputFile", + "azure.batch.models.OutputFileBlobContainerDestination": "Azure.Batch.OutputFileBlobContainerDestination", + "azure.batch.models.OutputFileDestination": "Azure.Batch.OutputFileDestination", + "azure.batch.models.OutputFileUploadConfig": "Azure.Batch.OutputFileUploadConfig", + "azure.batch.models.PublicIpAddressConfiguration": "Azure.Batch.PublicIpAddressConfiguration", + "azure.batch.models.RecentBatchJob": "Azure.Batch.RecentBatchJob", + "azure.batch.models.ResizeError": "Azure.Batch.ResizeError", + "azure.batch.models.ResourceFile": "Azure.Batch.ResourceFile", + "azure.batch.models.RollingUpgradePolicy": "Azure.Batch.RollingUpgradePolicy", + "azure.batch.models.SecurityProfile": "Azure.Batch.SecurityProfile", + "azure.batch.models.ServiceArtifactReference": "Azure.Batch.ServiceArtifactReference", + "azure.batch.models.UefiSettings": "Azure.Batch.UefiSettings", + "azure.batch.models.UpgradePolicy": "Azure.Batch.UpgradePolicy", + "azure.batch.models.UploadBatchServiceLogsContent": "Azure.Batch.UploadBatchServiceLogsContent", + "azure.batch.models.UploadBatchServiceLogsResult": "Azure.Batch.UploadBatchServiceLogsResult", + "azure.batch.models.UserAccount": "Azure.Batch.UserAccount", + "azure.batch.models.UserAssignedIdentity": "Azure.Batch.UserAssignedIdentity", + "azure.batch.models.UserIdentity": "Azure.Batch.UserIdentity", + "azure.batch.models.VirtualMachineConfiguration": "Azure.Batch.VirtualMachineConfiguration", + "azure.batch.models.VirtualMachineInfo": "Azure.Batch.VirtualMachineInfo", + "azure.batch.models.VMDiskSecurityProfile": "Azure.Batch.VMDiskSecurityProfile", + "azure.batch.models.VMExtension": "Azure.Batch.VMExtension", + "azure.batch.models.VMExtensionInstanceView": "Azure.Batch.VMExtensionInstanceView", + "azure.batch.models.WindowsConfiguration": "Azure.Batch.WindowsConfiguration", + "azure.batch.models.WindowsUserConfiguration": "Azure.Batch.WindowsUserConfiguration", + "azure.batch.models.CachingType": "Azure.Batch.CachingType", + "azure.batch.models.StorageAccountType": "Azure.Batch.StorageAccountType", + "azure.batch.models.ContainerType": "Azure.Batch.ContainerType", + "azure.batch.models.DiskEncryptionTarget": "Azure.Batch.DiskEncryptionTarget", + "azure.batch.models.BatchNodePlacementPolicyType": "Azure.Batch.BatchNodePlacementPolicyType", + "azure.batch.models.DiffDiskPlacement": "Azure.Batch.DiffDiskPlacement", + "azure.batch.models.SecurityEncryptionTypes": "Azure.Batch.SecurityEncryptionTypes", + "azure.batch.models.SecurityTypes": "Azure.Batch.SecurityTypes", + "azure.batch.models.DynamicVNetAssignmentScope": "Azure.Batch.DynamicVNetAssignmentScope", + "azure.batch.models.InboundEndpointProtocol": "Azure.Batch.InboundEndpointProtocol", + "azure.batch.models.NetworkSecurityGroupRuleAccess": "Azure.Batch.NetworkSecurityGroupRuleAccess", + "azure.batch.models.IpAddressProvisioningType": "Azure.Batch.IpAddressProvisioningType", + "azure.batch.models.ContainerWorkingDirectory": "Azure.Batch.ContainerWorkingDirectory", + "azure.batch.models.ContainerHostDataPath": "Azure.Batch.ContainerHostDataPath", + "azure.batch.models.AutoUserScope": "Azure.Batch.AutoUserScope", + "azure.batch.models.ElevationLevel": "Azure.Batch.ElevationLevel", + "azure.batch.models.BatchCertificateStoreLocation": "Azure.Batch.BatchCertificateStoreLocation", + "azure.batch.models.BatchCertificateVisibility": "Azure.Batch.BatchCertificateVisibility", + "azure.batch.models.BatchNodeFillType": "Azure.Batch.BatchNodeFillType", + "azure.batch.models.LoginMode": "Azure.Batch.LoginMode", + "azure.batch.models.BatchNodeCommunicationMode": "Azure.Batch.BatchNodeCommunicationMode", + "azure.batch.models.UpgradeMode": "Azure.Batch.UpgradeMode", + "azure.batch.models.BatchPoolState": "Azure.Batch.BatchPoolState", + "azure.batch.models.AllocationState": "Azure.Batch.AllocationState", + "azure.batch.models.BatchPoolIdentityType": "Azure.Batch.BatchPoolIdentityType", + "azure.batch.models.BatchNodeDeallocationOption": "Azure.Batch.BatchNodeDeallocationOption", + "azure.batch.models.OSType": "Azure.Batch.OSType", + "azure.batch.models.ImageVerificationType": "Azure.Batch.ImageVerificationType", + "azure.batch.models.BatchJobState": "Azure.Batch.BatchJobState", + "azure.batch.models.OutputFileUploadCondition": "Azure.Batch.OutputFileUploadCondition", + "azure.batch.models.AccessScope": "Azure.Batch.AccessScope", + "azure.batch.models.BatchPoolLifetimeOption": "Azure.Batch.BatchPoolLifetimeOption", + "azure.batch.models.OnAllBatchTasksComplete": "Azure.Batch.OnAllBatchTasksComplete", + "azure.batch.models.OnBatchTaskFailure": "Azure.Batch.OnBatchTaskFailure", + "azure.batch.models.ErrorCategory": "Azure.Batch.ErrorCategory", + "azure.batch.models.DisableBatchJobOption": "Azure.Batch.DisableBatchJobOption", + "azure.batch.models.BatchJobPreparationTaskState": "Azure.Batch.BatchJobPreparationTaskState", + "azure.batch.models.BatchTaskExecutionResult": "Azure.Batch.BatchTaskExecutionResult", + "azure.batch.models.BatchJobReleaseTaskState": "Azure.Batch.BatchJobReleaseTaskState", + "azure.batch.models.BatchCertificateState": "Azure.Batch.BatchCertificateState", + "azure.batch.models.BatchCertificateFormat": "Azure.Batch.BatchCertificateFormat", + "azure.batch.models.BatchJobScheduleState": "Azure.Batch.BatchJobScheduleState", + "azure.batch.models.BatchJobAction": "Azure.Batch.BatchJobAction", + "azure.batch.models.DependencyAction": "Azure.Batch.DependencyAction", + "azure.batch.models.BatchTaskState": "Azure.Batch.BatchTaskState", + "azure.batch.models.BatchTaskAddStatus": "Azure.Batch.BatchTaskAddStatus", + "azure.batch.models.BatchSubtaskState": "Azure.Batch.BatchSubtaskState", + "azure.batch.models.BatchNodeState": "Azure.Batch.BatchNodeState", + "azure.batch.models.SchedulingState": "Azure.Batch.SchedulingState", + "azure.batch.models.BatchStartTaskState": "Azure.Batch.BatchStartTaskState", + "azure.batch.models.BatchNodeRebootOption": "Azure.Batch.BatchNodeRebootOption", + "azure.batch.models.BatchNodeReimageOption": "Azure.Batch.BatchNodeReimageOption", + "azure.batch.models.BatchNodeDeallocateOption": "Azure.Batch.BatchNodeDeallocateOption", + "azure.batch.models.BatchNodeDisableSchedulingOption": "Azure.Batch.BatchNodeDisableSchedulingOption", + "azure.batch.models.StatusLevelTypes": "Azure.Batch.StatusLevelTypes", + "azure.batch.BatchClient.list_applications": "Client.BatchClient.listApplications", + "azure.batch.BatchClient.get_application": "Client.BatchClient.getApplication", + "azure.batch.BatchClient.list_pool_usage_metrics": "Client.BatchClient.listPoolUsageMetrics", + "azure.batch.BatchClient.create_pool": "Client.BatchClient.createPool", + "azure.batch.BatchClient.list_pools": "Client.BatchClient.listPools", + "azure.batch.BatchClient.delete_pool": "Client.BatchClient.deletePool", + "azure.batch.BatchClient.pool_exists": "Client.BatchClient.poolExists", + "azure.batch.BatchClient.get_pool": "Client.BatchClient.getPool", + "azure.batch.BatchClient.update_pool": "Client.BatchClient.updatePool", + "azure.batch.BatchClient.disable_pool_auto_scale": "Client.BatchClient.disablePoolAutoScale", + "azure.batch.BatchClient.enable_pool_auto_scale": "Client.BatchClient.enablePoolAutoScale", + "azure.batch.BatchClient.evaluate_pool_auto_scale": "Client.BatchClient.evaluatePoolAutoScale", + "azure.batch.BatchClient.resize_pool": "Client.BatchClient.resizePool", + "azure.batch.BatchClient.stop_pool_resize": "Client.BatchClient.stopPoolResize", + "azure.batch.BatchClient.replace_pool_properties": "Client.BatchClient.replacePoolProperties", + "azure.batch.BatchClient.remove_nodes": "Client.BatchClient.removeNodes", + "azure.batch.BatchClient.list_supported_images": "Client.BatchClient.listSupportedImages", + "azure.batch.BatchClient.list_pool_node_counts": "Client.BatchClient.listPoolNodeCounts", + "azure.batch.BatchClient.delete_job": "Client.BatchClient.deleteJob", + "azure.batch.BatchClient.get_job": "Client.BatchClient.getJob", + "azure.batch.BatchClient.update_job": "Client.BatchClient.updateJob", + "azure.batch.BatchClient.replace_job": "Client.BatchClient.replaceJob", + "azure.batch.BatchClient.disable_job": "Client.BatchClient.disableJob", + "azure.batch.BatchClient.enable_job": "Client.BatchClient.enableJob", + "azure.batch.BatchClient.terminate_job": "Client.BatchClient.terminateJob", + "azure.batch.BatchClient.create_job": "Client.BatchClient.createJob", + "azure.batch.BatchClient.list_jobs": "Client.BatchClient.listJobs", + "azure.batch.BatchClient.list_jobs_from_schedule": "Client.BatchClient.listJobsFromSchedule", + "azure.batch.BatchClient.list_job_preparation_and_release_task_status": "Client.BatchClient.listJobPreparationAndReleaseTaskStatus", + "azure.batch.BatchClient.get_job_task_counts": "Client.BatchClient.getJobTaskCounts", + "azure.batch.BatchClient.create_certificate": "Client.BatchClient.createCertificate", + "azure.batch.BatchClient.list_certificates": "Client.BatchClient.listCertificates", + "azure.batch.BatchClient.cancel_certificate_deletion": "Client.BatchClient.cancelCertificateDeletion", + "azure.batch.BatchClient.delete_certificate": "Client.BatchClient.deleteCertificate", + "azure.batch.BatchClient.get_certificate": "Client.BatchClient.getCertificate", + "azure.batch.BatchClient.job_schedule_exists": "Client.BatchClient.jobScheduleExists", + "azure.batch.BatchClient.delete_job_schedule": "Client.BatchClient.deleteJobSchedule", + "azure.batch.BatchClient.get_job_schedule": "Client.BatchClient.getJobSchedule", + "azure.batch.BatchClient.update_job_schedule": "Client.BatchClient.updateJobSchedule", + "azure.batch.BatchClient.replace_job_schedule": "Client.BatchClient.replaceJobSchedule", + "azure.batch.BatchClient.disable_job_schedule": "Client.BatchClient.disableJobSchedule", + "azure.batch.BatchClient.enable_job_schedule": "Client.BatchClient.enableJobSchedule", + "azure.batch.BatchClient.terminate_job_schedule": "Client.BatchClient.terminateJobSchedule", + "azure.batch.BatchClient.create_job_schedule": "Client.BatchClient.createJobSchedule", + "azure.batch.BatchClient.list_job_schedules": "Client.BatchClient.listJobSchedules", + "azure.batch.BatchClient.create_task": "Client.BatchClient.createTask", + "azure.batch.BatchClient.list_tasks": "Client.BatchClient.listTasks", + "azure.batch.BatchClient.create_task_collection": "Client.BatchClient.createTaskCollection", + "azure.batch.BatchClient.delete_task": "Client.BatchClient.deleteTask", + "azure.batch.BatchClient.get_task": "Client.BatchClient.getTask", + "azure.batch.BatchClient.replace_task": "Client.BatchClient.replaceTask", + "azure.batch.BatchClient.list_sub_tasks": "Client.BatchClient.listSubTasks", + "azure.batch.BatchClient.terminate_task": "Client.BatchClient.terminateTask", + "azure.batch.BatchClient.reactivate_task": "Client.BatchClient.reactivateTask", + "azure.batch.BatchClient.delete_task_file": "Client.BatchClient.deleteTaskFile", + "azure.batch.BatchClient.get_task_file": "Client.BatchClient.getTaskFile", + "azure.batch.BatchClient.get_task_file_properties": "Client.BatchClient.getTaskFileProperties", + "azure.batch.BatchClient.list_task_files": "Client.BatchClient.listTaskFiles", + "azure.batch.BatchClient.create_node_user": "Client.BatchClient.createNodeUser", + "azure.batch.BatchClient.delete_node_user": "Client.BatchClient.deleteNodeUser", + "azure.batch.BatchClient.replace_node_user": "Client.BatchClient.replaceNodeUser", + "azure.batch.BatchClient.get_node": "Client.BatchClient.getNode", + "azure.batch.BatchClient.reboot_node": "Client.BatchClient.rebootNode", + "azure.batch.BatchClient.start_node": "Client.BatchClient.startNode", + "azure.batch.BatchClient.reimage_node": "Client.BatchClient.reimageNode", + "azure.batch.BatchClient.deallocate_node": "Client.BatchClient.deallocateNode", + "azure.batch.BatchClient.disable_node_scheduling": "Client.BatchClient.disableNodeScheduling", + "azure.batch.BatchClient.enable_node_scheduling": "Client.BatchClient.enableNodeScheduling", + "azure.batch.BatchClient.get_node_remote_login_settings": "Client.BatchClient.getNodeRemoteLoginSettings", + "azure.batch.BatchClient.upload_node_logs": "Client.BatchClient.uploadNodeLogs", + "azure.batch.BatchClient.list_nodes": "Client.BatchClient.listNodes", + "azure.batch.BatchClient.get_node_extension": "Client.BatchClient.getNodeExtension", + "azure.batch.BatchClient.list_node_extensions": "Client.BatchClient.listNodeExtensions", + "azure.batch.BatchClient.delete_node_file": "Client.BatchClient.deleteNodeFile", + "azure.batch.BatchClient.get_node_file": "Client.BatchClient.getNodeFile", + "azure.batch.BatchClient.get_node_file_properties": "Client.BatchClient.getNodeFileProperties", + "azure.batch.BatchClient.list_node_files": "Client.BatchClient.listNodeFiles" + } +} \ No newline at end of file diff --git a/sdk/batch/azure-batch/azure/batch/_model_base.py b/sdk/batch/azure-batch/azure/batch/_model_base.py index 3072ee252ed9..065b17f67c46 100644 --- a/sdk/batch/azure-batch/azure/batch/_model_base.py +++ b/sdk/batch/azure-batch/azure/batch/_model_base.py @@ -2,8 +2,9 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=protected-access, broad-except diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py index 14c180e935e8..44dcbca2db30 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py @@ -478,11 +478,11 @@ def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -560,11 +560,11 @@ def build_batch_resize_pool_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -690,11 +690,11 @@ def build_batch_remove_nodes_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -1013,11 +1013,11 @@ def build_batch_disable_job_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -1114,11 +1114,11 @@ def build_batch_terminate_job_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -3387,7 +3387,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchApplication], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchApplication], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -3610,7 +3610,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -3814,7 +3814,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPool], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPool], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -5101,7 +5101,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -5214,7 +5214,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6249,7 +6249,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6375,7 +6375,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6503,7 +6503,9 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized["value"]) + list_of_elem = _deserialize( + List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized.get("value", []) + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6785,7 +6787,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6991,7 +6993,7 @@ def get_certificate( ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any - ) -> _models.GetCertificateResponse: + ) -> _models.BatchCertificate: """Gets information about the specified Certificate. :param thumbprint_algorithm: The algorithm used to derive the thumbprint parameter. This must @@ -7009,8 +7011,8 @@ def get_certificate( :paramtype ocpdate: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] - :return: GetCertificateResponse. The GetCertificateResponse is compatible with MutableMapping - :rtype: ~azure.batch.models.GetCertificateResponse + :return: BatchCertificate. The BatchCertificate is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchCertificate :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -7024,7 +7026,7 @@ def get_certificate( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.GetCertificateResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.BatchCertificate] = kwargs.pop("cls", None) _request = build_batch_get_certificate_request( thumbprint_algorithm=thumbprint_algorithm, @@ -7067,7 +7069,7 @@ def get_certificate( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.GetCertificateResponse, response.json()) + deserialized = _deserialize(_models.BatchCertificate, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -8143,7 +8145,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -8357,7 +8359,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchTask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchTask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -8936,7 +8938,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -9389,10 +9391,8 @@ def get_task_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace - def _get_task_file_properties_internal( + def get_task_file_properties( self, job_id: str, task_id: str, @@ -9597,7 +9597,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -10811,7 +10811,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNode], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNode], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -11028,7 +11028,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -11256,10 +11256,8 @@ def get_node_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace - def _get_node_file_properties_internal( + def get_node_file_properties( self, pool_id: str, node_id: str, @@ -11462,7 +11460,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py index 4e0857b30791..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py @@ -1,391 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import datetime -from typing import Any, Deque, List, Optional, Iterable, Iterator, overload -import collections -import logging -import threading +from typing import List -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from azure.core.rest import HttpResponse -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from ._operations import ( - BatchClientOperationsMixin as BatchClientOperationsMixinGenerated, -) - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -__all__: List[str] = [ - "BatchClientOperationsMixin" -] # Add all objects you want publicly available to users at this package level - - -class BatchClientOperationsMixin(BatchClientOperationsMixinGenerated): - """Customize generated code""" - - # create_task_collection renamed - @distributed_trace - def create_tasks( - self, - job_id: str, - task_collection: List[_models.BatchTaskCreateContent], - concurrencies: int = 0, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: - """Adds a collection of Tasks to the specified Job. - - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. - - :param job_id: The ID of the Job to which the Task collection is to be added. Required. - :type job_id: str - :param task_collection: The Tasks to be added. Required. - :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrencies: number of threads to use in parallel when adding tasks. If specified - and greater than 0, will start additional threads to submit requests and wait for them to finish. - Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrencies: int - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword content_type: Type of content. Default value is "application/json; - odata=minimalmetadata". - :paramtype content_type: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult - :raises ~azure.batch.custom.CreateTasksError - """ - - kwargs.update({"timeout": timeout, "ocpdate": ocpdate}) - - # deque operations(append/pop) are thread-safe - results_queue: Deque[_models.BatchTaskAddResult] = collections.deque() - task_workflow_manager = _TaskWorkflowManager( - self, job_id=job_id, task_collection=task_collection, **kwargs - ) - - # multi-threaded behavior - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") - - active_threads = [] - for i in range(concurrencies): - active_threads.append( - threading.Thread( - target=task_workflow_manager.task_collection_thread_handler, - args=(results_queue,), - ) - ) - active_threads[-1].start() - for thread in active_threads: - thread.join() - # single-threaded behavior - else: - task_workflow_manager.task_collection_thread_handler(results_queue) - - # Only define error if all threads have finished and there were failures - if task_workflow_manager.failure_tasks or task_workflow_manager.errors: - raise _models.CreateTasksError( - task_workflow_manager.tasks_to_add, - task_workflow_manager.failure_tasks, - task_workflow_manager.errors, - ) - else: - submitted_tasks = _handle_output(results_queue) - return _models.BatchTaskAddCollectionResult(value=submitted_tasks) - - @distributed_trace - def get_node_file( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> Iterator[bytes]: - """Returns the content of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - args = [pool_id, node_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return super().get_node_file(*args, **kwargs) - - @distributed_trace - def get_node_file_properties( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_node_file_properties_internal( # type: ignore - pool_id, - node_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - def get_task_file_properties( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_task_file_properties_internal( # type: ignore - job_id, - task_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - def get_task_file( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> Iterator[bytes]: - """Returns the content of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return super().get_task_file(*args, **kwargs) +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -395,174 +19,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -class _TaskWorkflowManager: - """Worker class for one create_task_collection request - - :param ~TaskOperations task_operations: Parent object which instantiated this - :param str job_id: The ID of the job to which the task collection is to be - added. - :param tasks_to_add: The collection of tasks to add. - :type tasks_to_add: list of :class:`TaskAddParameter - ` - :param task_create_task_collection_options: Additional parameters for the - operation - :type task_create_task_collection_options: :class:`BatchTaskAddCollectionResult - ` - """ - - def __init__( - self, - batch_client: BatchClientOperationsMixin, - job_id: str, - task_collection: Iterable[_models.BatchTaskCreateContent], - **kwargs - ): - # Append operations thread safe - Only read once all threads have completed - # List of tasks which failed to add due to a returned client error - self.failure_tasks: Deque[_models.BatchTaskAddResult] = collections.deque() - # List of unknown exceptions which occurred during requests. - self.errors: Deque[Any] = collections.deque() - - # synchronized through lock variables - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - self.tasks_to_add = collections.deque(task_collection) - self._error_lock = threading.Lock() - self._max_tasks_lock = threading.Lock() - self._pending_queue_lock = threading.Lock() - - # Variables to be used for task create_task_collection requests - self._batch_client = batch_client - self._job_id = job_id - - self._kwargs = kwargs - - def _bulk_add_tasks(self, results_queue, chunk_tasks_to_add): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :param chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :type chunk_tasks_to_add: list[~BatchTaskAddResult] - """ - - try: - create_task_collection_response: _models.BatchTaskAddCollectionResult = ( - self._batch_client.create_task_collection( - job_id=self._job_id, - task_collection=_models.BatchTaskGroup(value=chunk_tasks_to_add), - **self._kwargs - ) - ) - except HttpResponseError as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error and e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self.errors.appendleft(e) - _LOGGER.error( - "Failed to add task with ID %s due to the body" " exceeding the maximum request size", - failed_task.id, - ) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - # Restrict one thread at a time to do this compare and set, - # therefore forcing max_tasks_per_request to be strictly decreasing - with self._max_tasks_lock: - if midpoint < self._max_tasks_per_request: - _LOGGER.info( - "Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", - str(self._max_tasks_per_request), - str(midpoint), - ) - self._max_tasks_per_request = midpoint - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another thread . - self.tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: - self.tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - else: - try: - create_task_collection_response = create_task_collection_response.output - except AttributeError: - pass - if create_task_collection_response.value: - for task_result in create_task_collection_response.value: # pylint: disable=no-member - if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: - # Server error will be retried - with self._pending_queue_lock: - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self.tasks_to_add.appendleft(task) - elif ( - task_result.status == _models.BatchTaskAddStatus.CLIENT_ERROR - and not (task_result.error and task_result.error.code == "TaskExists") - ): - # Client error will be recorded unless Task already exists - self.failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - def task_collection_thread_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self.tasks_to_add and not self.errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - with self._pending_queue_lock: - while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: - chunk_tasks_to_add.append(self.tasks_to_add.pop()) - - if chunk_tasks_to_add: - self._bulk_add_tasks(results_queue, chunk_tasks_to_add) - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted create_task_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results diff --git a/sdk/batch/azure-batch/azure/batch/_patch.py b/sdk/batch/azure-batch/azure/batch/_patch.py index a9f1f6eeca2a..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/_patch.py @@ -1,162 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import base64 -import hmac -import hashlib -import importlib -from datetime import datetime -from typing import TYPE_CHECKING, TypeVar, Any, Union +from typing import List -from azure.core.pipeline.policies import SansIOHTTPPolicy -from azure.core.credentials import AzureNamedKeyCredential, TokenCredential -from azure.core.pipeline import PipelineResponse, PipelineRequest -from azure.core.pipeline.transport import HttpResponse -from azure.core.rest import HttpRequest - -from ._client import BatchClient as GenerateBatchClient -from ._serialization import ( - Serializer, - TZ_UTC, -) - -try: - from urlparse import urlparse, parse_qs -except ImportError: - from urllib.parse import urlparse, parse_qs -__all__ = [ - "BatchClient", -] # Add all objects you want publicly available to users at this package level - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Optional, TypeVar, Union - - from azure.core.credentials import TokenCredential - from azure.core.pipeline import PipelineRequest - - ClientType = TypeVar("ClientType", bound="BatchClient") - T = TypeVar("T") - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - - -class BatchSharedKeyAuthPolicy(SansIOHTTPPolicy): - - headers_to_sign = [ - "content-encoding", - "content-language", - "content-length", - "content-md5", - "content-type", - "date", - "if-modified-since", - "if-match", - "if-none-match", - "if-unmodified-since", - "range", - ] - - def __init__(self, credential: AzureNamedKeyCredential): - super(BatchSharedKeyAuthPolicy, self).__init__() - self._account_name = credential.named_key.name - self._key = credential.named_key.key - - def on_request(self, request: PipelineRequest): - if not request.http_request.headers.get("ocp-date"): - now = datetime.utcnow() - now = now.replace(tzinfo=TZ_UTC) - request.http_request.headers["ocp-date"] = Serializer.serialize_rfc(now) - url = urlparse(request.http_request.url) - uri_path = url.path - - # method to sign - string_to_sign = request.http_request.method + "\n" - - # get headers to sign - request_header_dict = {key.lower(): val for key, val in request.http_request.headers.items() if val} - - if request.http_request.method not in ["GET", "HEAD"]: - if "content-length" not in request_header_dict: - request_header_dict["content-length"] = "0" - - request_headers = [str(request_header_dict.get(x, "")) for x in self.headers_to_sign] - - string_to_sign += "\n".join(request_headers) + "\n" - - # get ocp- header to sign - ocp_headers = [] - for name, value in request.http_request.headers.items(): - if "ocp-" in name and value: - ocp_headers.append((name.lower(), value)) - for name, value in sorted(ocp_headers): - string_to_sign += "{}:{}\n".format(name, value) - # get account_name and uri path to sign - string_to_sign += "/{}{}".format(self._account_name, uri_path) - - # get query string to sign if it is not table service - query_to_sign = parse_qs(url.query) - - for name in sorted(query_to_sign.keys()): - value = query_to_sign[name][0] - if value: - string_to_sign += "\n{}:{}".format(name, value) - # sign the request - auth_string = "SharedKey {}:{}".format(self._account_name, self._sign_string(string_to_sign)) - - request.http_request.headers["Authorization"] = auth_string - - return super().on_request(request) - - def _sign_string(self, string_to_sign): - - _key = self._key.encode("utf-8") - string_to_sign = string_to_sign.encode("utf-8") - - try: - key = base64.b64decode(_key) - except TypeError: - raise ValueError("Invalid key value: {}".format(self._key)) - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - - return base64.b64encode(digest).decode("utf-8") - - -class BatchClient(GenerateBatchClient): - """BatchClient. - - :param endpoint: HTTP or HTTPS endpoint for the Web PubSub service instance. - :type endpoint: str - :param hub: Target hub name, which should start with alphabetic characters and only contain - alpha-numeric characters or underscore. - :type hub: str - :param credentials: Credential needed for the client to connect to Azure. - :type credentials: ~azure.identity.ClientSecretCredential, ~azure.core.credentials.AzureNamedKeyCredential, - or ~azure.identity.TokenCredentials - :keyword api_version: Api Version. The default value is "2021-10-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: Union[AzureNamedKeyCredential, TokenCredential], **kwargs): - super().__init__( - endpoint=endpoint, - credential=credential, # type: ignore - authentication_policy=kwargs.pop( - "authentication_policy", self._format_shared_key_credential("", credential) - ), - **kwargs - ) - - def _format_shared_key_credential(self, account_name, credential): - if isinstance(credential, AzureNamedKeyCredential): - return BatchSharedKeyAuthPolicy(credential) - return None +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/batch/azure-batch/azure/batch/_serialization.py b/sdk/batch/azure-batch/azure/batch/_serialization.py index 7a0232de5ddc..eb86ea23c965 100644 --- a/sdk/batch/azure-batch/azure/batch/_serialization.py +++ b/sdk/batch/azure-batch/azure/batch/_serialization.py @@ -1,28 +1,10 @@ # pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 # -------------------------------------------------------------------------- -# # Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pyright: reportUnnecessaryTypeIgnoreComment=false diff --git a/sdk/batch/azure-batch/azure/batch/_version.py b/sdk/batch/azure-batch/azure/batch/_version.py index 0068a9979d9d..be71c81bd282 100644 --- a/sdk/batch/azure-batch/azure/batch/_version.py +++ b/sdk/batch/azure-batch/azure/batch/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "15.0.0b2" +VERSION = "1.0.0b1" diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py index ddda2a1d449d..b0d936dcd73a 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py @@ -211,7 +211,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchApplication], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchApplication], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -434,7 +434,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -638,7 +638,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPool], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPool], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -1925,7 +1925,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -2038,7 +2038,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3073,7 +3073,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3199,7 +3199,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3327,7 +3327,9 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized["value"]) + list_of_elem = _deserialize( + List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized.get("value", []) + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3609,7 +3611,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3815,7 +3817,7 @@ async def get_certificate( ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any - ) -> _models.GetCertificateResponse: + ) -> _models.BatchCertificate: """Gets information about the specified Certificate. :param thumbprint_algorithm: The algorithm used to derive the thumbprint parameter. This must @@ -3833,8 +3835,8 @@ async def get_certificate( :paramtype ocpdate: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] - :return: GetCertificateResponse. The GetCertificateResponse is compatible with MutableMapping - :rtype: ~azure.batch.models.GetCertificateResponse + :return: BatchCertificate. The BatchCertificate is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchCertificate :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -3848,7 +3850,7 @@ async def get_certificate( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.GetCertificateResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.BatchCertificate] = kwargs.pop("cls", None) _request = build_batch_get_certificate_request( thumbprint_algorithm=thumbprint_algorithm, @@ -3891,7 +3893,7 @@ async def get_certificate( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.GetCertificateResponse, response.json()) + deserialized = _deserialize(_models.BatchCertificate, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -4967,7 +4969,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -5181,7 +5183,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchTask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchTask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -5760,7 +5762,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -6213,10 +6215,8 @@ async def get_task_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace_async - async def _get_task_file_properties_internal( + async def get_task_file_properties( self, job_id: str, task_id: str, @@ -6421,7 +6421,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -7635,7 +7635,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNode], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNode], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -7852,7 +7852,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -8080,10 +8080,8 @@ async def get_node_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace_async - async def _get_node_file_properties_internal( + async def get_node_file_properties( self, pool_id: str, node_id: str, @@ -8286,7 +8284,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py index b3c12ac94cfb..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py @@ -1,523 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio -import datetime -import collections -import logging -from typing import Any, Deque, AsyncIterator, List, Iterable, Optional, Union +from typing import List -from azure.batch import models as _models -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from ._operations import ( - BatchClientOperationsMixin as BatchClientOperationsMixinGenerated, -) - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -__all__: List[str] = [ - "BatchClientOperationsMixin" -] # Add all objects you want publicly available to users at this package level - - -class BatchClientOperationsMixin(BatchClientOperationsMixinGenerated): - """Customize generated code""" - - # create_task_collection renamed - @distributed_trace - async def create_tasks( - self, - job_id: str, - task_collection: List[_models.BatchTaskCreateContent], - concurrencies: int = 0, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: - """Adds a collection of Tasks to the specified Job. - - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. - - :param job_id: The ID of the Job to which the Task collection is to be added. Required. - :type job_id: str - :param task_collection: The Tasks to be added. Required. - :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrency: number of coroutines to use in parallel when adding tasks. If specified - and greater than 0, will start additional coroutines to submit requests and wait for them to finish. - Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrency: int - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword content_type: Type of content. Default value is "application/json; - odata=minimalmetadata". - :paramtype content_type: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult - :raises ~azure.batch.custom.CreateTasksError - """ - - kwargs.update({"timeout": timeout, "ocpdate": ocpdate}) - - results_queue: Deque[_models.BatchTaskAddResult] = collections.deque() - task_workflow_manager = _TaskWorkflowManager( - self, job_id=job_id, task_collection=task_collection, **kwargs - ) - - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") - - coroutines = [] - for i in range(concurrencies): - coroutines.append(task_workflow_manager.task_collection_handler(results_queue)) - await asyncio.gather(*coroutines) - else: - await task_workflow_manager.task_collection_handler(results_queue) - - # Only define error if all coroutines have finished and there were failures - if task_workflow_manager.failure_tasks or task_workflow_manager.errors: - raise _models.CreateTasksError( - task_workflow_manager.tasks_to_add, - task_workflow_manager.failure_tasks, - task_workflow_manager.errors, - ) - else: - submitted_tasks = _handle_output(results_queue) - return _models.BatchTaskAddCollectionResult(value=submitted_tasks) - - @distributed_trace - async def get_node_file( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> AsyncIterator[bytes]: - """Returns the content of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - args = [pool_id, node_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return await super().get_node_file(*args, **kwargs) - - @distributed_trace - async def get_node_file_properties( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_node_file_properties_internal( # type: ignore - pool_id, - node_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - async def get_task_file_properties( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_task_file_properties_internal( # type: ignore - job_id, - task_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - async def get_task_file( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> AsyncIterator[bytes]: - """Returns the content of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return await super().get_task_file(*args, **kwargs) - - -class _TaskWorkflowManager: - """Worker class for one create_task_collection request - - :param str job_id: The ID of the job to which the task collection is to be - added. - :ivar tasks_to_add: The collection of tasks to add. - :vartype tasks_to_add: Iterable[~azure.batch.models.BatchTaskCreateContent] - :param task_create_task_collection_options: Additional parameters for the - operation - :type task_create_task_collection_options: :class:`TaskAddCollectionOptions - ` - """ - - def __init__( - self, - batch_client: BatchClientOperationsMixin, - job_id: str, - task_collection: Iterable[_models.BatchTaskCreateContent], - **kwargs - ): - # List of tasks which failed to add due to a returned client error - self.failure_tasks: Deque[_models.BatchTaskAddResult] = collections.deque() - # List of unknown exceptions which occurred during requests. - self.errors: Deque[Any] = collections.deque() - - # synchronized through lock variables - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - self.tasks_to_add = collections.deque(task_collection) - - # Variables to be used for task create_task_collection requests - self._batch_client = batch_client - self._job_id = job_id - - self._kwargs = kwargs - - async def _bulk_add_tasks( - self, - results_queue: collections.deque, - chunk_tasks_to_add: List[_models.BatchTaskCreateContent], - ): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :ivar chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :vartype chunk_tasks_to_add: list[~azure.batch.models.BatchTaskCreateContent] - """ - - try: - create_task_collection_response: _models.BatchTaskAddCollectionResult = ( - await self._batch_client.create_task_collection( - job_id=self._job_id, - task_collection=_models.BatchTaskGroup(value=chunk_tasks_to_add), - **self._kwargs - ) - ) - except HttpResponseError as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error and e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self.errors.appendleft(e) - _LOGGER.error( - "Failed to add task with ID %s due to the body" " exceeding the maximum request size", - failed_task.id, - ) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - if midpoint < self._max_tasks_per_request: - _LOGGER.info( - "Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", - str(self._max_tasks_per_request), - str(midpoint), - ) - self._max_tasks_per_request = midpoint - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another coroutines . - self.tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - await self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: # type: ignore - self.tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - else: - if create_task_collection_response.value: - for task_result in create_task_collection_response.value: - if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: - # Server error will be retried - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self.tasks_to_add.appendleft(task) - elif ( - task_result.status == _models.BatchTaskAddStatus.CLIENT_ERROR - and not (task_result.error and task_result.error.code == "TaskExists") - ): - # Client error will be recorded unless Task already exists - self.failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - async def task_collection_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self.tasks_to_add and not self.errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: - chunk_tasks_to_add.append(self.tasks_to_add.pop()) - - if chunk_tasks_to_add: - await self._bulk_add_tasks(results_queue, chunk_tasks_to_add) +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -527,20 +19,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted create_task_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results diff --git a/sdk/batch/azure-batch/azure/batch/aio/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_patch.py index 64a3f1262c22..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_patch.py @@ -1,55 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from typing import List -from ._client import BatchClient as GenerateBatchClient -from .._patch import BatchSharedKeyAuthPolicy -from azure.core.credentials import TokenCredential - - -from azure.core.credentials import AzureNamedKeyCredential - - -from typing import Union - -__all__ = [ - "BatchClient", -] # Add all objects you want publicly available to users at this package level - - -class BatchClient(GenerateBatchClient): - """BatchClient. - - :param endpoint: HTTP or HTTPS endpoint for the Web PubSub service instance. - :type endpoint: str - :param hub: Target hub name, which should start with alphabetic characters and only contain - alpha-numeric characters or underscore. - :type hub: str - :param credentials: Credential needed for the client to connect to Azure. - :type credentials: ~azure.identity.ClientSecretCredential, ~azure.core.credentials.AzureNamedKeyCredential, - or ~azure.identity.TokenCredentials - :keyword api_version: Api Version. The default value is "2021-10-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: Union[AzureNamedKeyCredential, TokenCredential], **kwargs): - super().__init__( - endpoint=endpoint, - credential=credential, # type: ignore - authentication_policy=kwargs.pop("authentication_policy", self._format_shared_key_credential(credential)), - **kwargs - ) - - def _format_shared_key_credential(self, credential): - if isinstance(credential, AzureNamedKeyCredential): - return BatchSharedKeyAuthPolicy(credential) - return None +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/batch/azure-batch/azure/batch/models/__init__.py b/sdk/batch/azure-batch/azure/batch/models/__init__.py index c54aed6d5845..393ee8aedeb0 100644 --- a/sdk/batch/azure-batch/azure/batch/models/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/models/__init__.py @@ -123,7 +123,6 @@ ExitConditions, ExitOptions, FileProperties, - GetCertificateResponse, HttpHeader, ImageReference, InboundEndpoint, @@ -337,7 +336,6 @@ "ExitConditions", "ExitOptions", "FileProperties", - "GetCertificateResponse", "HttpHeader", "ImageReference", "InboundEndpoint", diff --git a/sdk/batch/azure-batch/azure/batch/models/_models.py b/sdk/batch/azure-batch/azure/batch/models/_models.py index 842decf5de5c..fb952c085549 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_models.py +++ b/sdk/batch/azure-batch/azure/batch/models/_models.py @@ -9166,88 +9166,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class GetCertificateResponse(_model_base.Model): - """GetCertificateResponse. - - :ivar thumbprint: The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex - digits (it may include spaces but these are removed). Required. - :vartype thumbprint: str - :ivar thumbprint_algorithm: The algorithm used to derive the thumbprint. This must be sha1. - Required. - :vartype thumbprint_algorithm: str - :ivar url: The URL of the Certificate. - :vartype url: str - :ivar state: The state of the Certificate. Known values are: "active", "deleting", and - "deletefailed". - :vartype state: str or ~azure.batch.models.BatchCertificateState - :ivar state_transition_time: The time at which the Certificate entered its current state. - :vartype state_transition_time: ~datetime.datetime - :ivar previous_state: The previous state of the Certificate. This property is not set if the - Certificate is in its initial active state. Known values are: "active", "deleting", and - "deletefailed". - :vartype previous_state: str or ~azure.batch.models.BatchCertificateState - :ivar previous_state_transition_time: The time at which the Certificate entered its previous - state. This property is not set if the Certificate is in its initial Active state. - :vartype previous_state_transition_time: ~datetime.datetime - :ivar public_data: The public part of the Certificate as a base-64 encoded .cer file. - :vartype public_data: str - :ivar delete_certificate_error: The error that occurred on the last attempt to delete this - Certificate. This property is set only if the Certificate is in the DeleteFailed state. - :vartype delete_certificate_error: ~azure.batch.models.DeleteBatchCertificateError - """ - - thumbprint: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may - include spaces but these are removed). Required.""" - thumbprint_algorithm: str = rest_field( - name="thumbprintAlgorithm", visibility=["read", "create", "update", "delete", "query"] - ) - """The algorithm used to derive the thumbprint. This must be sha1. Required.""" - url: Optional[str] = rest_field(visibility=["read"]) - """The URL of the Certificate.""" - state: Optional[Union[str, "_models.BatchCertificateState"]] = rest_field(visibility=["read"]) - """The state of the Certificate. Known values are: \"active\", \"deleting\", and \"deletefailed\".""" - state_transition_time: Optional[datetime.datetime] = rest_field( - name="stateTransitionTime", visibility=["read"], format="rfc3339" - ) - """The time at which the Certificate entered its current state.""" - previous_state: Optional[Union[str, "_models.BatchCertificateState"]] = rest_field( - name="previousState", visibility=["read"] - ) - """The previous state of the Certificate. This property is not set if the Certificate is in its - initial active state. Known values are: \"active\", \"deleting\", and \"deletefailed\".""" - previous_state_transition_time: Optional[datetime.datetime] = rest_field( - name="previousStateTransitionTime", visibility=["read"], format="rfc3339" - ) - """The time at which the Certificate entered its previous state. This property is not set if the - Certificate is in its initial Active state.""" - public_data: Optional[str] = rest_field(name="publicData", visibility=["read"]) - """The public part of the Certificate as a base-64 encoded .cer file.""" - delete_certificate_error: Optional["_models.DeleteBatchCertificateError"] = rest_field( - name="deleteCertificateError", visibility=["read"] - ) - """The error that occurred on the last attempt to delete this Certificate. This property is set - only if the Certificate is in the DeleteFailed state.""" - - @overload - def __init__( - self, - *, - thumbprint: str, - thumbprint_algorithm: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class HttpHeader(_model_base.Model): """An HTTP header name-value pair. diff --git a/sdk/batch/azure-batch/azure/batch/models/_patch.py b/sdk/batch/azure-batch/azure/batch/models/_patch.py index 6435ce16c022..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/models/_patch.py @@ -1,123 +1,16 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import datetime -from typing import List, Any, Optional +from typing import List -from azure.core.exceptions import HttpResponseError +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level -from ._models import BatchPoolReplaceContent as BatchPoolReplaceContentGenerated -from .._model_base import rest_field - -__all__: List[str] = [ - "CreateTasksError", - "BatchFileProperties", -] # Add all objects you want publicly available to users at this package level - -class CreateTasksError(HttpResponseError): - """Aggregate Exception containing details for any failures from a task add operation. - - :param str message: Error message describing exit reason - :param [~TaskAddParameter] pending_tasks: List of tasks remaining to be submitted. - :param [~TaskAddResult] failure_tasks: List of tasks which failed to add - :param [~Exception] errors: List of unknown errors forcing early termination - """ - - def __init__(self, pending_tasks=[], failure_tasks=[], errors=[]): - self.pending_tasks = pending_tasks - self.failure_tasks = failure_tasks - self.errors = errors - if failure_tasks and errors: - self.message = ( - "Multiple errors encountered. Check the `failure_tasks` and " - "`errors` properties for additional details." - ) - elif errors: - if len(errors) > 1: - self.message = ( - "Multiple errors occurred when submitting add_collection " - "requests. Check the `errors` property for the inner " - "exceptions." - ) - else: - self.message = str(errors[0]) - elif failure_tasks: - if len(failure_tasks) > 1: - self.message = ( - "Multiple client side errors occurred when adding the " - "tasks. Check the `failure_tasks` property for details on" - " these tasks." - ) - else: - result = failure_tasks[0] - self.message = "Task with id `%s` failed due to client error - %s::%s" % ( - result.task_id, - result.error.code, - result.error.message, - ) - super(CreateTasksError, self).__init__(self.message) - -class BatchFileProperties: - - """Information about a file or directory on a Compute Node with additional properties. - - :ivar url: The URL of the file. - :vartype url: str - :ivar is_directory: Whether the object represents a directory. - :vartype is_directory: bool - :ivar creation_time: The file creation time. The creation time is not returned for files on - Linux Compute Nodes. - :vartype creation_time: ~datetime.datetime - :ivar last_modified: The time at which the file was last modified. Required. - :vartype last_modified: ~datetime.datetime - :ivar content_length: The length of the file. Required. - :vartype content_length: int - :ivar content_type: The content type of the file. - :vartype content_type: str - :ivar file_mode: The file mode attribute in octal format. The file mode is returned only for - files on Linux Compute Nodes. - :vartype file_mode: str - """ - - url: Optional[str] - """The URL of the file.""" - is_directory: Optional[bool] - """Whether the object represents a directory.""" - creation_time: Optional[datetime.datetime] - """The file creation time. The creation time is not returned for files on Linux Compute Nodes.""" - last_modified: datetime.datetime - """The time at which the file was last modified. Required.""" - content_length: int - """The length of the file. Required.""" - content_type: Optional[str] - """The content type of the file.""" - file_mode: Optional[str] - """The file mode attribute in octal format. The file mode is returned only for files on Linux - Compute Nodes.""" - - def __init__( - self, - *, - url: Optional[str] = None, - is_directory: Optional[bool] = None, - last_modified: datetime.datetime, - content_length: int, - creation_time: Optional[datetime.datetime] = None, - content_type: Optional[str] = None, - file_mode: Optional[str] = None, - ) -> None: - self.url = url - self.is_directory = is_directory - self.creation_time = creation_time - self.last_modified = last_modified - self.content_length = content_length - self.content_type = content_type - self.file_mode = file_mode def patch_sdk(): """Do not remove from this file. diff --git a/sdk/batch/azure-batch/setup.py b/sdk/batch/azure-batch/setup.py index c8218eb0bece..ec3020b52c84 100644 --- a/sdk/batch/azure-batch/setup.py +++ b/sdk/batch/azure-batch/setup.py @@ -5,7 +5,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -# coding: utf-8 + import os import re @@ -29,7 +29,7 @@ setup( name=PACKAGE_NAME, version=version, - description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + description="Microsoft Corporation {} Client Library for Python".format(PACKAGE_PPRINT_NAME), long_description=open("README.md", "r").read(), long_description_content_type="text/markdown", license="MIT License", diff --git a/sdk/batch/azure-batch/tests/test_batch.py b/sdk/batch/azure-batch/tests/test_batch.py index 51e33cb2465b..fedd042965d0 100644 --- a/sdk/batch/azure-batch/tests/test_batch.py +++ b/sdk/batch/azure-batch/tests/test_batch.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=too-many-lines,line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- @@ -85,7 +85,7 @@ async def assertCreateTasksError(self, code, func, *args, **kwargs): pytest.fail("Inner BatchErrorException expected but not exist") except Exception as err: pytest.fail("Expected CreateTasksError, instead got: {!r}".format(err)) - + @CachedResourceGroupPreparer(location=AZURE_LOCATION) @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT) @pytest.mark.parametrize("BatchClient", [SyncBatchClient, AsyncBatchClient], ids=["sync", "async"]) @@ -1184,4 +1184,4 @@ async def test_batch_jobs(self, client: BatchClient, **kwargs): # Test Delete Job response = await wrap_result(client.delete_job(job_auto_param.id)) - assert response is None + assert response is None \ No newline at end of file diff --git a/sdk/batch/azure-batch/tsp-location.yaml b/sdk/batch/azure-batch/tsp-location.yaml index 96f30af8578f..baedb3c3e0c4 100644 --- a/sdk/batch/azure-batch/tsp-location.yaml +++ b/sdk/batch/azure-batch/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/batch/Azure.Batch -commit: e7ed63002df7e9cc1d3e4cd139d76c4d7040acd3 +commit: 167d2be6a8c000e27109c75f9133218e92c64724 repo: Azure/azure-rest-api-specs additionalDirectories: