diff --git a/sdk/batch/azure-batch/MANIFEST.in b/sdk/batch/azure-batch/MANIFEST.in index cb1e2b1128cb..06208ba9fde2 100644 --- a/sdk/batch/azure-batch/MANIFEST.in +++ b/sdk/batch/azure-batch/MANIFEST.in @@ -3,4 +3,4 @@ include LICENSE include azure/batch/py.typed recursive-include tests *.py recursive-include samples *.py *.md -include azure/__init__.py \ No newline at end of file +include azure/__init__.py diff --git a/sdk/batch/azure-batch/_meta.json b/sdk/batch/azure-batch/_meta.json new file mode 100644 index 000000000000..97a2d9fce6f9 --- /dev/null +++ b/sdk/batch/azure-batch/_meta.json @@ -0,0 +1,6 @@ +{ + "commit": "6a439da0fa799a27aef35d4fac4a29455cc86fec", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/batch/Azure.Batch", + "@azure-tools/typespec-python": "0.44.2" +} \ No newline at end of file diff --git a/sdk/batch/azure-batch/apiview-properties.json b/sdk/batch/azure-batch/apiview-properties.json new file mode 100644 index 000000000000..4662a9ba9c23 --- /dev/null +++ b/sdk/batch/azure-batch/apiview-properties.json @@ -0,0 +1,363 @@ +{ + "CrossLanguagePackageId": "Azure.Batch", + "CrossLanguageDefinitionId": { + "azure.batch.models.AffinityInfo": "Azure.Batch.AffinityInfo", + "azure.batch.models.AuthenticationTokenSettings": "Azure.Batch.AuthenticationTokenSettings", + "azure.batch.models.AutomaticOsUpgradePolicy": "Azure.Batch.AutomaticOsUpgradePolicy", + "azure.batch.models.AutoScaleRun": "Azure.Batch.AutoScaleRun", + "azure.batch.models.AutoScaleRunError": "Azure.Batch.AutoScaleRunError", + "azure.batch.models.AutoUserSpecification": "Azure.Batch.AutoUserSpecification", + "azure.batch.models.AzureBlobFileSystemConfiguration": "Azure.Batch.AzureBlobFileSystemConfiguration", + "azure.batch.models.AzureFileShareConfiguration": "Azure.Batch.AzureFileShareConfiguration", + "azure.batch.models.BatchApplication": "Azure.Batch.BatchApplication", + "azure.batch.models.BatchApplicationPackageReference": "Azure.Batch.BatchApplicationPackageReference", + "azure.batch.models.BatchAutoPoolSpecification": "Azure.Batch.BatchAutoPoolSpecification", + "azure.batch.models.BatchCertificate": "Azure.Batch.BatchCertificate", + "azure.batch.models.BatchCertificateReference": "Azure.Batch.BatchCertificateReference", + "azure.batch.models.BatchError": "Azure.Batch.BatchError", + "azure.batch.models.BatchErrorDetail": "Azure.Batch.BatchErrorDetail", + "azure.batch.models.BatchErrorMessage": "Azure.Batch.BatchErrorMessage", + "azure.batch.models.BatchJob": "Azure.Batch.BatchJob", + "azure.batch.models.BatchJobConstraints": "Azure.Batch.BatchJobConstraints", + "azure.batch.models.BatchJobCreateContent": "Azure.Batch.BatchJobCreateContent", + "azure.batch.models.BatchJobDisableContent": "Azure.Batch.BatchJobDisableContent", + "azure.batch.models.BatchJobExecutionInfo": "Azure.Batch.BatchJobExecutionInfo", + "azure.batch.models.BatchJobManagerTask": "Azure.Batch.BatchJobManagerTask", + "azure.batch.models.BatchJobNetworkConfiguration": "Azure.Batch.BatchJobNetworkConfiguration", + "azure.batch.models.BatchJobPreparationAndReleaseTaskStatus": "Azure.Batch.BatchJobPreparationAndReleaseTaskStatus", + "azure.batch.models.BatchJobPreparationTask": "Azure.Batch.BatchJobPreparationTask", + "azure.batch.models.BatchJobPreparationTaskExecutionInfo": "Azure.Batch.BatchJobPreparationTaskExecutionInfo", + "azure.batch.models.BatchJobReleaseTask": "Azure.Batch.BatchJobReleaseTask", + "azure.batch.models.BatchJobReleaseTaskExecutionInfo": "Azure.Batch.BatchJobReleaseTaskExecutionInfo", + "azure.batch.models.BatchJobSchedule": "Azure.Batch.BatchJobSchedule", + "azure.batch.models.BatchJobScheduleConfiguration": "Azure.Batch.BatchJobScheduleConfiguration", + "azure.batch.models.BatchJobScheduleCreateContent": "Azure.Batch.BatchJobScheduleCreateContent", + "azure.batch.models.BatchJobScheduleExecutionInfo": "Azure.Batch.BatchJobScheduleExecutionInfo", + "azure.batch.models.BatchJobScheduleStatistics": "Azure.Batch.BatchJobScheduleStatistics", + "azure.batch.models.BatchJobScheduleUpdateContent": "Azure.Batch.BatchJobScheduleUpdateContent", + "azure.batch.models.BatchJobSchedulingError": "Azure.Batch.BatchJobSchedulingError", + "azure.batch.models.BatchJobSpecification": "Azure.Batch.BatchJobSpecification", + "azure.batch.models.BatchJobStatistics": "Azure.Batch.BatchJobStatistics", + "azure.batch.models.BatchJobTerminateContent": "Azure.Batch.BatchJobTerminateContent", + "azure.batch.models.BatchJobUpdateContent": "Azure.Batch.BatchJobUpdateContent", + "azure.batch.models.BatchNode": "Azure.Batch.BatchNode", + "azure.batch.models.BatchNodeAgentInfo": "Azure.Batch.BatchNodeAgentInfo", + "azure.batch.models.BatchNodeCounts": "Azure.Batch.BatchNodeCounts", + "azure.batch.models.BatchNodeDeallocateContent": "Azure.Batch.BatchNodeDeallocateContent", + "azure.batch.models.BatchNodeDisableSchedulingContent": "Azure.Batch.BatchNodeDisableSchedulingContent", + "azure.batch.models.BatchNodeEndpointConfiguration": "Azure.Batch.BatchNodeEndpointConfiguration", + "azure.batch.models.BatchNodeError": "Azure.Batch.BatchNodeError", + "azure.batch.models.BatchNodeFile": "Azure.Batch.BatchNodeFile", + "azure.batch.models.BatchNodeIdentityReference": "Azure.Batch.BatchNodeIdentityReference", + "azure.batch.models.BatchNodeInfo": "Azure.Batch.BatchNodeInfo", + "azure.batch.models.BatchNodePlacementConfiguration": "Azure.Batch.BatchNodePlacementConfiguration", + "azure.batch.models.BatchNodeRebootContent": "Azure.Batch.BatchNodeRebootContent", + "azure.batch.models.BatchNodeReimageContent": "Azure.Batch.BatchNodeReimageContent", + "azure.batch.models.BatchNodeRemoteLoginSettings": "Azure.Batch.BatchNodeRemoteLoginSettings", + "azure.batch.models.BatchNodeRemoveContent": "Azure.Batch.BatchNodeRemoveContent", + "azure.batch.models.BatchNodeUserCreateContent": "Azure.Batch.BatchNodeUserCreateContent", + "azure.batch.models.BatchNodeUserUpdateContent": "Azure.Batch.BatchNodeUserUpdateContent", + "azure.batch.models.BatchNodeVMExtension": "Azure.Batch.BatchNodeVMExtension", + "azure.batch.models.BatchPool": "Azure.Batch.BatchPool", + "azure.batch.models.BatchPoolCreateContent": "Azure.Batch.BatchPoolCreateContent", + "azure.batch.models.BatchPoolEnableAutoScaleContent": "Azure.Batch.BatchPoolEnableAutoScaleContent", + "azure.batch.models.BatchPoolEndpointConfiguration": "Azure.Batch.BatchPoolEndpointConfiguration", + "azure.batch.models.BatchPoolEvaluateAutoScaleContent": "Azure.Batch.BatchPoolEvaluateAutoScaleContent", + "azure.batch.models.BatchPoolIdentity": "Azure.Batch.BatchPoolIdentity", + "azure.batch.models.BatchPoolInfo": "Azure.Batch.BatchPoolInfo", + "azure.batch.models.BatchPoolNodeCounts": "Azure.Batch.BatchPoolNodeCounts", + "azure.batch.models.BatchPoolReplaceContent": "Azure.Batch.BatchPoolReplaceContent", + "azure.batch.models.BatchPoolResizeContent": "Azure.Batch.BatchPoolResizeContent", + "azure.batch.models.BatchPoolResourceStatistics": "Azure.Batch.BatchPoolResourceStatistics", + "azure.batch.models.BatchPoolSpecification": "Azure.Batch.BatchPoolSpecification", + "azure.batch.models.BatchPoolStatistics": "Azure.Batch.BatchPoolStatistics", + "azure.batch.models.BatchPoolUpdateContent": "Azure.Batch.BatchPoolUpdateContent", + "azure.batch.models.BatchPoolUsageMetrics": "Azure.Batch.BatchPoolUsageMetrics", + "azure.batch.models.BatchPoolUsageStatistics": "Azure.Batch.BatchPoolUsageStatistics", + "azure.batch.models.BatchStartTask": "Azure.Batch.BatchStartTask", + "azure.batch.models.BatchStartTaskInfo": "Azure.Batch.BatchStartTaskInfo", + "azure.batch.models.BatchSubtask": "Azure.Batch.BatchSubtask", + "azure.batch.models.BatchSupportedImage": "Azure.Batch.BatchSupportedImage", + "azure.batch.models.BatchTask": "Azure.Batch.BatchTask", + "azure.batch.models.BatchTaskAddCollectionResult": "Azure.Batch.BatchTaskAddCollectionResult", + "azure.batch.models.BatchTaskAddResult": "Azure.Batch.BatchTaskAddResult", + "azure.batch.models.BatchTaskConstraints": "Azure.Batch.BatchTaskConstraints", + "azure.batch.models.BatchTaskContainerExecutionInfo": "Azure.Batch.BatchTaskContainerExecutionInfo", + "azure.batch.models.BatchTaskContainerSettings": "Azure.Batch.BatchTaskContainerSettings", + "azure.batch.models.BatchTaskCounts": "Azure.Batch.BatchTaskCounts", + "azure.batch.models.BatchTaskCountsResult": "Azure.Batch.BatchTaskCountsResult", + "azure.batch.models.BatchTaskCreateContent": "Azure.Batch.BatchTaskCreateContent", + "azure.batch.models.BatchTaskDependencies": "Azure.Batch.BatchTaskDependencies", + "azure.batch.models.BatchTaskExecutionInfo": "Azure.Batch.BatchTaskExecutionInfo", + "azure.batch.models.BatchTaskFailureInfo": "Azure.Batch.BatchTaskFailureInfo", + "azure.batch.models.BatchTaskGroup": "Azure.Batch.BatchTaskGroup", + "azure.batch.models.BatchTaskIdRange": "Azure.Batch.BatchTaskIdRange", + "azure.batch.models.BatchTaskInfo": "Azure.Batch.BatchTaskInfo", + "azure.batch.models.BatchTaskSchedulingPolicy": "Azure.Batch.BatchTaskSchedulingPolicy", + "azure.batch.models.BatchTaskSlotCounts": "Azure.Batch.BatchTaskSlotCounts", + "azure.batch.models.BatchTaskStatistics": "Azure.Batch.BatchTaskStatistics", + "azure.batch.models.CifsMountConfiguration": "Azure.Batch.CifsMountConfiguration", + "azure.batch.models.ContainerConfiguration": "Azure.Batch.ContainerConfiguration", + "azure.batch.models.ContainerHostBatchBindMountEntry": "Azure.Batch.ContainerHostBatchBindMountEntry", + "azure.batch.models.ContainerRegistryReference": "Azure.Batch.ContainerRegistryReference", + "azure.batch.models.DataDisk": "Azure.Batch.DataDisk", + "azure.batch.models.DeleteBatchCertificateError": "Azure.Batch.DeleteBatchCertificateError", + "azure.batch.models.DiffDiskSettings": "Azure.Batch.DiffDiskSettings", + "azure.batch.models.DiskEncryptionConfiguration": "Azure.Batch.DiskEncryptionConfiguration", + "azure.batch.models.EnvironmentSetting": "Azure.Batch.EnvironmentSetting", + "azure.batch.models.ExitCodeMapping": "Azure.Batch.ExitCodeMapping", + "azure.batch.models.ExitCodeRangeMapping": "Azure.Batch.ExitCodeRangeMapping", + "azure.batch.models.ExitConditions": "Azure.Batch.ExitConditions", + "azure.batch.models.ExitOptions": "Azure.Batch.ExitOptions", + "azure.batch.models.FileProperties": "Azure.Batch.FileProperties", + "azure.batch.models.HttpHeader": "Azure.Batch.HttpHeader", + "azure.batch.models.ImageReference": "Azure.Batch.ImageReference", + "azure.batch.models.InboundEndpoint": "Azure.Batch.InboundEndpoint", + "azure.batch.models.InboundNatPool": "Azure.Batch.InboundNatPool", + "azure.batch.models.InstanceViewStatus": "Azure.Batch.InstanceViewStatus", + "azure.batch.models.LinuxUserConfiguration": "Azure.Batch.LinuxUserConfiguration", + "azure.batch.models.ManagedDisk": "Azure.Batch.ManagedDisk", + "azure.batch.models.MetadataItem": "Azure.Batch.MetadataItem", + "azure.batch.models.MountConfiguration": "Azure.Batch.MountConfiguration", + "azure.batch.models.MultiInstanceSettings": "Azure.Batch.MultiInstanceSettings", + "azure.batch.models.NameValuePair": "Azure.Batch.NameValuePair", + "azure.batch.models.NetworkConfiguration": "Azure.Batch.NetworkConfiguration", + "azure.batch.models.NetworkSecurityGroupRule": "Azure.Batch.NetworkSecurityGroupRule", + "azure.batch.models.NfsMountConfiguration": "Azure.Batch.NfsMountConfiguration", + "azure.batch.models.OSDisk": "Azure.Batch.OSDisk", + "azure.batch.models.OutputFile": "Azure.Batch.OutputFile", + "azure.batch.models.OutputFileBlobContainerDestination": "Azure.Batch.OutputFileBlobContainerDestination", + "azure.batch.models.OutputFileDestination": "Azure.Batch.OutputFileDestination", + "azure.batch.models.OutputFileUploadConfig": "Azure.Batch.OutputFileUploadConfig", + "azure.batch.models.PublicIpAddressConfiguration": "Azure.Batch.PublicIpAddressConfiguration", + "azure.batch.models.RecentBatchJob": "Azure.Batch.RecentBatchJob", + "azure.batch.models.ResizeError": "Azure.Batch.ResizeError", + "azure.batch.models.ResourceFile": "Azure.Batch.ResourceFile", + "azure.batch.models.RollingUpgradePolicy": "Azure.Batch.RollingUpgradePolicy", + "azure.batch.models.SecurityProfile": "Azure.Batch.SecurityProfile", + "azure.batch.models.ServiceArtifactReference": "Azure.Batch.ServiceArtifactReference", + "azure.batch.models.UefiSettings": "Azure.Batch.UefiSettings", + "azure.batch.models.UpgradePolicy": "Azure.Batch.UpgradePolicy", + "azure.batch.models.UploadBatchServiceLogsContent": "Azure.Batch.UploadBatchServiceLogsContent", + "azure.batch.models.UploadBatchServiceLogsResult": "Azure.Batch.UploadBatchServiceLogsResult", + "azure.batch.models.UserAccount": "Azure.Batch.UserAccount", + "azure.batch.models.UserAssignedIdentity": "Azure.Batch.UserAssignedIdentity", + "azure.batch.models.UserIdentity": "Azure.Batch.UserIdentity", + "azure.batch.models.VirtualMachineConfiguration": "Azure.Batch.VirtualMachineConfiguration", + "azure.batch.models.VirtualMachineInfo": "Azure.Batch.VirtualMachineInfo", + "azure.batch.models.VMDiskSecurityProfile": "Azure.Batch.VMDiskSecurityProfile", + "azure.batch.models.VMExtension": "Azure.Batch.VMExtension", + "azure.batch.models.VMExtensionInstanceView": "Azure.Batch.VMExtensionInstanceView", + "azure.batch.models.WindowsConfiguration": "Azure.Batch.WindowsConfiguration", + "azure.batch.models.WindowsUserConfiguration": "Azure.Batch.WindowsUserConfiguration", + "azure.batch.models.CachingType": "Azure.Batch.CachingType", + "azure.batch.models.StorageAccountType": "Azure.Batch.StorageAccountType", + "azure.batch.models.ContainerType": "Azure.Batch.ContainerType", + "azure.batch.models.DiskEncryptionTarget": "Azure.Batch.DiskEncryptionTarget", + "azure.batch.models.BatchNodePlacementPolicyType": "Azure.Batch.BatchNodePlacementPolicyType", + "azure.batch.models.DiffDiskPlacement": "Azure.Batch.DiffDiskPlacement", + "azure.batch.models.SecurityEncryptionTypes": "Azure.Batch.SecurityEncryptionTypes", + "azure.batch.models.SecurityTypes": "Azure.Batch.SecurityTypes", + "azure.batch.models.DynamicVNetAssignmentScope": "Azure.Batch.DynamicVNetAssignmentScope", + "azure.batch.models.InboundEndpointProtocol": "Azure.Batch.InboundEndpointProtocol", + "azure.batch.models.NetworkSecurityGroupRuleAccess": "Azure.Batch.NetworkSecurityGroupRuleAccess", + "azure.batch.models.IpAddressProvisioningType": "Azure.Batch.IpAddressProvisioningType", + "azure.batch.models.ContainerWorkingDirectory": "Azure.Batch.ContainerWorkingDirectory", + "azure.batch.models.ContainerHostDataPath": "Azure.Batch.ContainerHostDataPath", + "azure.batch.models.AutoUserScope": "Azure.Batch.AutoUserScope", + "azure.batch.models.ElevationLevel": "Azure.Batch.ElevationLevel", + "azure.batch.models.BatchCertificateStoreLocation": "Azure.Batch.BatchCertificateStoreLocation", + "azure.batch.models.BatchCertificateVisibility": "Azure.Batch.BatchCertificateVisibility", + "azure.batch.models.BatchNodeFillType": "Azure.Batch.BatchNodeFillType", + "azure.batch.models.LoginMode": "Azure.Batch.LoginMode", + "azure.batch.models.BatchNodeCommunicationMode": "Azure.Batch.BatchNodeCommunicationMode", + "azure.batch.models.UpgradeMode": "Azure.Batch.UpgradeMode", + "azure.batch.models.BatchPoolState": "Azure.Batch.BatchPoolState", + "azure.batch.models.AllocationState": "Azure.Batch.AllocationState", + "azure.batch.models.BatchPoolIdentityType": "Azure.Batch.BatchPoolIdentityType", + "azure.batch.models.BatchNodeDeallocationOption": "Azure.Batch.BatchNodeDeallocationOption", + "azure.batch.models.OSType": "Azure.Batch.OSType", + "azure.batch.models.ImageVerificationType": "Azure.Batch.ImageVerificationType", + "azure.batch.models.BatchJobState": "Azure.Batch.BatchJobState", + "azure.batch.models.OutputFileUploadCondition": "Azure.Batch.OutputFileUploadCondition", + "azure.batch.models.AccessScope": "Azure.Batch.AccessScope", + "azure.batch.models.BatchPoolLifetimeOption": "Azure.Batch.BatchPoolLifetimeOption", + "azure.batch.models.OnAllBatchTasksComplete": "Azure.Batch.OnAllBatchTasksComplete", + "azure.batch.models.OnBatchTaskFailure": "Azure.Batch.OnBatchTaskFailure", + "azure.batch.models.ErrorCategory": "Azure.Batch.ErrorCategory", + "azure.batch.models.DisableBatchJobOption": "Azure.Batch.DisableBatchJobOption", + "azure.batch.models.BatchJobPreparationTaskState": "Azure.Batch.BatchJobPreparationTaskState", + "azure.batch.models.BatchTaskExecutionResult": "Azure.Batch.BatchTaskExecutionResult", + "azure.batch.models.BatchJobReleaseTaskState": "Azure.Batch.BatchJobReleaseTaskState", + "azure.batch.models.BatchCertificateState": "Azure.Batch.BatchCertificateState", + "azure.batch.models.BatchCertificateFormat": "Azure.Batch.BatchCertificateFormat", + "azure.batch.models.BatchJobScheduleState": "Azure.Batch.BatchJobScheduleState", + "azure.batch.models.BatchJobAction": "Azure.Batch.BatchJobAction", + "azure.batch.models.DependencyAction": "Azure.Batch.DependencyAction", + "azure.batch.models.BatchTaskState": "Azure.Batch.BatchTaskState", + "azure.batch.models.BatchTaskAddStatus": "Azure.Batch.BatchTaskAddStatus", + "azure.batch.models.BatchSubtaskState": "Azure.Batch.BatchSubtaskState", + "azure.batch.models.BatchNodeState": "Azure.Batch.BatchNodeState", + "azure.batch.models.SchedulingState": "Azure.Batch.SchedulingState", + "azure.batch.models.BatchStartTaskState": "Azure.Batch.BatchStartTaskState", + "azure.batch.models.BatchNodeRebootOption": "Azure.Batch.BatchNodeRebootOption", + "azure.batch.models.BatchNodeReimageOption": "Azure.Batch.BatchNodeReimageOption", + "azure.batch.models.BatchNodeDeallocateOption": "Azure.Batch.BatchNodeDeallocateOption", + "azure.batch.models.BatchNodeDisableSchedulingOption": "Azure.Batch.BatchNodeDisableSchedulingOption", + "azure.batch.models.StatusLevelTypes": "Azure.Batch.StatusLevelTypes", + "azure.batch.BatchClient.list_applications": "Client.BatchClient.listApplications", + "azure.batch.aio.BatchClient.list_applications": "Client.BatchClient.listApplications", + "azure.batch.BatchClient.get_application": "Client.BatchClient.getApplication", + "azure.batch.aio.BatchClient.get_application": "Client.BatchClient.getApplication", + "azure.batch.BatchClient.list_pool_usage_metrics": "Client.BatchClient.listPoolUsageMetrics", + "azure.batch.aio.BatchClient.list_pool_usage_metrics": "Client.BatchClient.listPoolUsageMetrics", + "azure.batch.BatchClient.create_pool": "Client.BatchClient.createPool", + "azure.batch.aio.BatchClient.create_pool": "Client.BatchClient.createPool", + "azure.batch.BatchClient.list_pools": "Client.BatchClient.listPools", + "azure.batch.aio.BatchClient.list_pools": "Client.BatchClient.listPools", + "azure.batch.BatchClient.delete_pool": "Client.BatchClient.deletePool", + "azure.batch.aio.BatchClient.delete_pool": "Client.BatchClient.deletePool", + "azure.batch.BatchClient.pool_exists": "Client.BatchClient.poolExists", + "azure.batch.aio.BatchClient.pool_exists": "Client.BatchClient.poolExists", + "azure.batch.BatchClient.get_pool": "Client.BatchClient.getPool", + "azure.batch.aio.BatchClient.get_pool": "Client.BatchClient.getPool", + "azure.batch.BatchClient.update_pool": "Client.BatchClient.updatePool", + "azure.batch.aio.BatchClient.update_pool": "Client.BatchClient.updatePool", + "azure.batch.BatchClient.disable_pool_auto_scale": "Client.BatchClient.disablePoolAutoScale", + "azure.batch.aio.BatchClient.disable_pool_auto_scale": "Client.BatchClient.disablePoolAutoScale", + "azure.batch.BatchClient.enable_pool_auto_scale": "Client.BatchClient.enablePoolAutoScale", + "azure.batch.aio.BatchClient.enable_pool_auto_scale": "Client.BatchClient.enablePoolAutoScale", + "azure.batch.BatchClient.evaluate_pool_auto_scale": "Client.BatchClient.evaluatePoolAutoScale", + "azure.batch.aio.BatchClient.evaluate_pool_auto_scale": "Client.BatchClient.evaluatePoolAutoScale", + "azure.batch.BatchClient.resize_pool": "Client.BatchClient.resizePool", + "azure.batch.aio.BatchClient.resize_pool": "Client.BatchClient.resizePool", + "azure.batch.BatchClient.stop_pool_resize": "Client.BatchClient.stopPoolResize", + "azure.batch.aio.BatchClient.stop_pool_resize": "Client.BatchClient.stopPoolResize", + "azure.batch.BatchClient.replace_pool_properties": "Client.BatchClient.replacePoolProperties", + "azure.batch.aio.BatchClient.replace_pool_properties": "Client.BatchClient.replacePoolProperties", + "azure.batch.BatchClient.remove_nodes": "Client.BatchClient.removeNodes", + "azure.batch.aio.BatchClient.remove_nodes": "Client.BatchClient.removeNodes", + "azure.batch.BatchClient.list_supported_images": "Client.BatchClient.listSupportedImages", + "azure.batch.aio.BatchClient.list_supported_images": "Client.BatchClient.listSupportedImages", + "azure.batch.BatchClient.list_pool_node_counts": "Client.BatchClient.listPoolNodeCounts", + "azure.batch.aio.BatchClient.list_pool_node_counts": "Client.BatchClient.listPoolNodeCounts", + "azure.batch.BatchClient.delete_job": "Client.BatchClient.deleteJob", + "azure.batch.aio.BatchClient.delete_job": "Client.BatchClient.deleteJob", + "azure.batch.BatchClient.get_job": "Client.BatchClient.getJob", + "azure.batch.aio.BatchClient.get_job": "Client.BatchClient.getJob", + "azure.batch.BatchClient.update_job": "Client.BatchClient.updateJob", + "azure.batch.aio.BatchClient.update_job": "Client.BatchClient.updateJob", + "azure.batch.BatchClient.replace_job": "Client.BatchClient.replaceJob", + "azure.batch.aio.BatchClient.replace_job": "Client.BatchClient.replaceJob", + "azure.batch.BatchClient.disable_job": "Client.BatchClient.disableJob", + "azure.batch.aio.BatchClient.disable_job": "Client.BatchClient.disableJob", + "azure.batch.BatchClient.enable_job": "Client.BatchClient.enableJob", + "azure.batch.aio.BatchClient.enable_job": "Client.BatchClient.enableJob", + "azure.batch.BatchClient.terminate_job": "Client.BatchClient.terminateJob", + "azure.batch.aio.BatchClient.terminate_job": "Client.BatchClient.terminateJob", + "azure.batch.BatchClient.create_job": "Client.BatchClient.createJob", + "azure.batch.aio.BatchClient.create_job": "Client.BatchClient.createJob", + "azure.batch.BatchClient.list_jobs": "Client.BatchClient.listJobs", + "azure.batch.aio.BatchClient.list_jobs": "Client.BatchClient.listJobs", + "azure.batch.BatchClient.list_jobs_from_schedule": "Client.BatchClient.listJobsFromSchedule", + "azure.batch.aio.BatchClient.list_jobs_from_schedule": "Client.BatchClient.listJobsFromSchedule", + "azure.batch.BatchClient.list_job_preparation_and_release_task_status": "Client.BatchClient.listJobPreparationAndReleaseTaskStatus", + "azure.batch.aio.BatchClient.list_job_preparation_and_release_task_status": "Client.BatchClient.listJobPreparationAndReleaseTaskStatus", + "azure.batch.BatchClient.get_job_task_counts": "Client.BatchClient.getJobTaskCounts", + "azure.batch.aio.BatchClient.get_job_task_counts": "Client.BatchClient.getJobTaskCounts", + "azure.batch.BatchClient.create_certificate": "Client.BatchClient.createCertificate", + "azure.batch.aio.BatchClient.create_certificate": "Client.BatchClient.createCertificate", + "azure.batch.BatchClient.list_certificates": "Client.BatchClient.listCertificates", + "azure.batch.aio.BatchClient.list_certificates": "Client.BatchClient.listCertificates", + "azure.batch.BatchClient.cancel_certificate_deletion": "Client.BatchClient.cancelCertificateDeletion", + "azure.batch.aio.BatchClient.cancel_certificate_deletion": "Client.BatchClient.cancelCertificateDeletion", + "azure.batch.BatchClient.delete_certificate": "Client.BatchClient.deleteCertificate", + "azure.batch.aio.BatchClient.delete_certificate": "Client.BatchClient.deleteCertificate", + "azure.batch.BatchClient.get_certificate": "Client.BatchClient.getCertificate", + "azure.batch.aio.BatchClient.get_certificate": "Client.BatchClient.getCertificate", + "azure.batch.BatchClient.job_schedule_exists": "Client.BatchClient.jobScheduleExists", + "azure.batch.aio.BatchClient.job_schedule_exists": "Client.BatchClient.jobScheduleExists", + "azure.batch.BatchClient.delete_job_schedule": "Client.BatchClient.deleteJobSchedule", + "azure.batch.aio.BatchClient.delete_job_schedule": "Client.BatchClient.deleteJobSchedule", + "azure.batch.BatchClient.get_job_schedule": "Client.BatchClient.getJobSchedule", + "azure.batch.aio.BatchClient.get_job_schedule": "Client.BatchClient.getJobSchedule", + "azure.batch.BatchClient.update_job_schedule": "Client.BatchClient.updateJobSchedule", + "azure.batch.aio.BatchClient.update_job_schedule": "Client.BatchClient.updateJobSchedule", + "azure.batch.BatchClient.replace_job_schedule": "Client.BatchClient.replaceJobSchedule", + "azure.batch.aio.BatchClient.replace_job_schedule": "Client.BatchClient.replaceJobSchedule", + "azure.batch.BatchClient.disable_job_schedule": "Client.BatchClient.disableJobSchedule", + "azure.batch.aio.BatchClient.disable_job_schedule": "Client.BatchClient.disableJobSchedule", + "azure.batch.BatchClient.enable_job_schedule": "Client.BatchClient.enableJobSchedule", + "azure.batch.aio.BatchClient.enable_job_schedule": "Client.BatchClient.enableJobSchedule", + "azure.batch.BatchClient.terminate_job_schedule": "Client.BatchClient.terminateJobSchedule", + "azure.batch.aio.BatchClient.terminate_job_schedule": "Client.BatchClient.terminateJobSchedule", + "azure.batch.BatchClient.create_job_schedule": "Client.BatchClient.createJobSchedule", + "azure.batch.aio.BatchClient.create_job_schedule": "Client.BatchClient.createJobSchedule", + "azure.batch.BatchClient.list_job_schedules": "Client.BatchClient.listJobSchedules", + "azure.batch.aio.BatchClient.list_job_schedules": "Client.BatchClient.listJobSchedules", + "azure.batch.BatchClient.create_task": "Client.BatchClient.createTask", + "azure.batch.aio.BatchClient.create_task": "Client.BatchClient.createTask", + "azure.batch.BatchClient.list_tasks": "Client.BatchClient.listTasks", + "azure.batch.aio.BatchClient.list_tasks": "Client.BatchClient.listTasks", + "azure.batch.BatchClient.create_task_collection": "Client.BatchClient.createTaskCollection", + "azure.batch.aio.BatchClient.create_task_collection": "Client.BatchClient.createTaskCollection", + "azure.batch.BatchClient.delete_task": "Client.BatchClient.deleteTask", + "azure.batch.aio.BatchClient.delete_task": "Client.BatchClient.deleteTask", + "azure.batch.BatchClient.get_task": "Client.BatchClient.getTask", + "azure.batch.aio.BatchClient.get_task": "Client.BatchClient.getTask", + "azure.batch.BatchClient.replace_task": "Client.BatchClient.replaceTask", + "azure.batch.aio.BatchClient.replace_task": "Client.BatchClient.replaceTask", + "azure.batch.BatchClient.list_sub_tasks": "Client.BatchClient.listSubTasks", + "azure.batch.aio.BatchClient.list_sub_tasks": "Client.BatchClient.listSubTasks", + "azure.batch.BatchClient.terminate_task": "Client.BatchClient.terminateTask", + "azure.batch.aio.BatchClient.terminate_task": "Client.BatchClient.terminateTask", + "azure.batch.BatchClient.reactivate_task": "Client.BatchClient.reactivateTask", + "azure.batch.aio.BatchClient.reactivate_task": "Client.BatchClient.reactivateTask", + "azure.batch.BatchClient.delete_task_file": "Client.BatchClient.deleteTaskFile", + "azure.batch.aio.BatchClient.delete_task_file": "Client.BatchClient.deleteTaskFile", + "azure.batch.BatchClient.get_task_file": "Client.BatchClient.getTaskFile", + "azure.batch.aio.BatchClient.get_task_file": "Client.BatchClient.getTaskFile", + "azure.batch.BatchClient.get_task_file_properties": "Client.BatchClient.getTaskFileProperties", + "azure.batch.aio.BatchClient.get_task_file_properties": "Client.BatchClient.getTaskFileProperties", + "azure.batch.BatchClient.list_task_files": "Client.BatchClient.listTaskFiles", + "azure.batch.aio.BatchClient.list_task_files": "Client.BatchClient.listTaskFiles", + "azure.batch.BatchClient.create_node_user": "Client.BatchClient.createNodeUser", + "azure.batch.aio.BatchClient.create_node_user": "Client.BatchClient.createNodeUser", + "azure.batch.BatchClient.delete_node_user": "Client.BatchClient.deleteNodeUser", + "azure.batch.aio.BatchClient.delete_node_user": "Client.BatchClient.deleteNodeUser", + "azure.batch.BatchClient.replace_node_user": "Client.BatchClient.replaceNodeUser", + "azure.batch.aio.BatchClient.replace_node_user": "Client.BatchClient.replaceNodeUser", + "azure.batch.BatchClient.get_node": "Client.BatchClient.getNode", + "azure.batch.aio.BatchClient.get_node": "Client.BatchClient.getNode", + "azure.batch.BatchClient.reboot_node": "Client.BatchClient.rebootNode", + "azure.batch.aio.BatchClient.reboot_node": "Client.BatchClient.rebootNode", + "azure.batch.BatchClient.start_node": "Client.BatchClient.startNode", + "azure.batch.aio.BatchClient.start_node": "Client.BatchClient.startNode", + "azure.batch.BatchClient.reimage_node": "Client.BatchClient.reimageNode", + "azure.batch.aio.BatchClient.reimage_node": "Client.BatchClient.reimageNode", + "azure.batch.BatchClient.deallocate_node": "Client.BatchClient.deallocateNode", + "azure.batch.aio.BatchClient.deallocate_node": "Client.BatchClient.deallocateNode", + "azure.batch.BatchClient.disable_node_scheduling": "Client.BatchClient.disableNodeScheduling", + "azure.batch.aio.BatchClient.disable_node_scheduling": "Client.BatchClient.disableNodeScheduling", + "azure.batch.BatchClient.enable_node_scheduling": "Client.BatchClient.enableNodeScheduling", + "azure.batch.aio.BatchClient.enable_node_scheduling": "Client.BatchClient.enableNodeScheduling", + "azure.batch.BatchClient.get_node_remote_login_settings": "Client.BatchClient.getNodeRemoteLoginSettings", + "azure.batch.aio.BatchClient.get_node_remote_login_settings": "Client.BatchClient.getNodeRemoteLoginSettings", + "azure.batch.BatchClient.upload_node_logs": "Client.BatchClient.uploadNodeLogs", + "azure.batch.aio.BatchClient.upload_node_logs": "Client.BatchClient.uploadNodeLogs", + "azure.batch.BatchClient.list_nodes": "Client.BatchClient.listNodes", + "azure.batch.aio.BatchClient.list_nodes": "Client.BatchClient.listNodes", + "azure.batch.BatchClient.get_node_extension": "Client.BatchClient.getNodeExtension", + "azure.batch.aio.BatchClient.get_node_extension": "Client.BatchClient.getNodeExtension", + "azure.batch.BatchClient.list_node_extensions": "Client.BatchClient.listNodeExtensions", + "azure.batch.aio.BatchClient.list_node_extensions": "Client.BatchClient.listNodeExtensions", + "azure.batch.BatchClient.delete_node_file": "Client.BatchClient.deleteNodeFile", + "azure.batch.aio.BatchClient.delete_node_file": "Client.BatchClient.deleteNodeFile", + "azure.batch.BatchClient.get_node_file": "Client.BatchClient.getNodeFile", + "azure.batch.aio.BatchClient.get_node_file": "Client.BatchClient.getNodeFile", + "azure.batch.BatchClient.get_node_file_properties": "Client.BatchClient.getNodeFileProperties", + "azure.batch.aio.BatchClient.get_node_file_properties": "Client.BatchClient.getNodeFileProperties", + "azure.batch.BatchClient.list_node_files": "Client.BatchClient.listNodeFiles", + "azure.batch.aio.BatchClient.list_node_files": "Client.BatchClient.listNodeFiles" + } +} \ No newline at end of file diff --git a/sdk/batch/azure-batch/azure/batch/_client.py b/sdk/batch/azure-batch/azure/batch/_client.py index 4ce5c282dbf4..3657c6c6ed45 100644 --- a/sdk/batch/azure-batch/azure/batch/_client.py +++ b/sdk/batch/azure-batch/azure/batch/_client.py @@ -16,7 +16,7 @@ from ._configuration import BatchClientConfiguration from ._operations import BatchClientOperationsMixin -from ._serialization import Deserializer, Serializer +from ._utils.serialization import Deserializer, Serializer if TYPE_CHECKING: from azure.core.credentials import TokenCredential @@ -39,6 +39,7 @@ class BatchClient(BatchClientOperationsMixin): def __init__(self, endpoint: str, credential: "TokenCredential", **kwargs: Any) -> None: _endpoint = "{endpoint}" self._config = BatchClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + kwargs["request_id_header_name"] = "client-request-id" _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py index 14c180e935e8..f5ea8a53fb01 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py @@ -6,13 +6,13 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping import datetime import json -import sys from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, TypeVar import urllib.parse -from azure.core import MatchConditions +from azure.core import MatchConditions, PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -31,14 +31,11 @@ from azure.core.utils import case_insensitive_dict from .. import models as _models -from .._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize -from .._serialization import Serializer -from .._vendor import BatchClientMixinABC, prep_if_match, prep_if_none_match - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore +from .._configuration import BatchClientConfiguration +from .._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from .._utils.serialization import Serializer +from .._utils.utils import ClientMixinABC, prep_if_match, prep_if_none_match + T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] @@ -478,11 +475,11 @@ def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -560,11 +557,11 @@ def build_batch_resize_pool_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -690,11 +687,11 @@ def build_batch_remove_nodes_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -1013,11 +1010,11 @@ def build_batch_disable_job_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -1114,11 +1111,11 @@ def build_batch_terminate_job_request( # Construct headers if ocpdate is not None: _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: @@ -3298,7 +3295,9 @@ def build_batch_list_node_files_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -class BatchClientOperationsMixin(BatchClientMixinABC): # pylint: disable=too-many-public-methods +class BatchClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], BatchClientConfiguration] +): @distributed_trace def list_applications( @@ -3387,7 +3386,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchApplication], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchApplication], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -3610,7 +3609,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -3814,7 +3813,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPool], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPool], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -5101,7 +5100,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -5214,7 +5213,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6249,7 +6248,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6375,7 +6374,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6503,7 +6502,9 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized["value"]) + list_of_elem = _deserialize( + List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized.get("value", []) + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6785,7 +6786,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -6991,7 +6992,7 @@ def get_certificate( ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any - ) -> _models.GetCertificateResponse: + ) -> _models.BatchCertificate: """Gets information about the specified Certificate. :param thumbprint_algorithm: The algorithm used to derive the thumbprint parameter. This must @@ -7009,8 +7010,8 @@ def get_certificate( :paramtype ocpdate: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] - :return: GetCertificateResponse. The GetCertificateResponse is compatible with MutableMapping - :rtype: ~azure.batch.models.GetCertificateResponse + :return: BatchCertificate. The BatchCertificate is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchCertificate :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -7024,7 +7025,7 @@ def get_certificate( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.GetCertificateResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.BatchCertificate] = kwargs.pop("cls", None) _request = build_batch_get_certificate_request( thumbprint_algorithm=thumbprint_algorithm, @@ -7067,7 +7068,7 @@ def get_certificate( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.GetCertificateResponse, response.json()) + deserialized = _deserialize(_models.BatchCertificate, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -8143,7 +8144,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -8357,7 +8358,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchTask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchTask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -8936,7 +8937,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -9389,10 +9390,8 @@ def get_task_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace - def _get_task_file_properties_internal( + def get_task_file_properties( self, job_id: str, task_id: str, @@ -9597,7 +9596,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -10811,7 +10810,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNode], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNode], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -11028,7 +11027,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) @@ -11256,10 +11255,8 @@ def get_node_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace - def _get_node_file_properties_internal( + def get_node_file_properties( self, pool_id: str, node_id: str, @@ -11462,7 +11459,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, iter(list_of_elem) diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py index 4e0857b30791..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py @@ -1,391 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import datetime -from typing import Any, Deque, List, Optional, Iterable, Iterator, overload -import collections -import logging -import threading +from typing import List -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from azure.core.rest import HttpResponse -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from ._operations import ( - BatchClientOperationsMixin as BatchClientOperationsMixinGenerated, -) - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -__all__: List[str] = [ - "BatchClientOperationsMixin" -] # Add all objects you want publicly available to users at this package level - - -class BatchClientOperationsMixin(BatchClientOperationsMixinGenerated): - """Customize generated code""" - - # create_task_collection renamed - @distributed_trace - def create_tasks( - self, - job_id: str, - task_collection: List[_models.BatchTaskCreateContent], - concurrencies: int = 0, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: - """Adds a collection of Tasks to the specified Job. - - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. - - :param job_id: The ID of the Job to which the Task collection is to be added. Required. - :type job_id: str - :param task_collection: The Tasks to be added. Required. - :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrencies: number of threads to use in parallel when adding tasks. If specified - and greater than 0, will start additional threads to submit requests and wait for them to finish. - Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrencies: int - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword content_type: Type of content. Default value is "application/json; - odata=minimalmetadata". - :paramtype content_type: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult - :raises ~azure.batch.custom.CreateTasksError - """ - - kwargs.update({"timeout": timeout, "ocpdate": ocpdate}) - - # deque operations(append/pop) are thread-safe - results_queue: Deque[_models.BatchTaskAddResult] = collections.deque() - task_workflow_manager = _TaskWorkflowManager( - self, job_id=job_id, task_collection=task_collection, **kwargs - ) - - # multi-threaded behavior - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") - - active_threads = [] - for i in range(concurrencies): - active_threads.append( - threading.Thread( - target=task_workflow_manager.task_collection_thread_handler, - args=(results_queue,), - ) - ) - active_threads[-1].start() - for thread in active_threads: - thread.join() - # single-threaded behavior - else: - task_workflow_manager.task_collection_thread_handler(results_queue) - - # Only define error if all threads have finished and there were failures - if task_workflow_manager.failure_tasks or task_workflow_manager.errors: - raise _models.CreateTasksError( - task_workflow_manager.tasks_to_add, - task_workflow_manager.failure_tasks, - task_workflow_manager.errors, - ) - else: - submitted_tasks = _handle_output(results_queue) - return _models.BatchTaskAddCollectionResult(value=submitted_tasks) - - @distributed_trace - def get_node_file( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> Iterator[bytes]: - """Returns the content of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - args = [pool_id, node_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return super().get_node_file(*args, **kwargs) - - @distributed_trace - def get_node_file_properties( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_node_file_properties_internal( # type: ignore - pool_id, - node_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - def get_task_file_properties( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_task_file_properties_internal( # type: ignore - job_id, - task_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - def get_task_file( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> Iterator[bytes]: - """Returns the content of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return super().get_task_file(*args, **kwargs) +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -395,174 +19,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -class _TaskWorkflowManager: - """Worker class for one create_task_collection request - - :param ~TaskOperations task_operations: Parent object which instantiated this - :param str job_id: The ID of the job to which the task collection is to be - added. - :param tasks_to_add: The collection of tasks to add. - :type tasks_to_add: list of :class:`TaskAddParameter - ` - :param task_create_task_collection_options: Additional parameters for the - operation - :type task_create_task_collection_options: :class:`BatchTaskAddCollectionResult - ` - """ - - def __init__( - self, - batch_client: BatchClientOperationsMixin, - job_id: str, - task_collection: Iterable[_models.BatchTaskCreateContent], - **kwargs - ): - # Append operations thread safe - Only read once all threads have completed - # List of tasks which failed to add due to a returned client error - self.failure_tasks: Deque[_models.BatchTaskAddResult] = collections.deque() - # List of unknown exceptions which occurred during requests. - self.errors: Deque[Any] = collections.deque() - - # synchronized through lock variables - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - self.tasks_to_add = collections.deque(task_collection) - self._error_lock = threading.Lock() - self._max_tasks_lock = threading.Lock() - self._pending_queue_lock = threading.Lock() - - # Variables to be used for task create_task_collection requests - self._batch_client = batch_client - self._job_id = job_id - - self._kwargs = kwargs - - def _bulk_add_tasks(self, results_queue, chunk_tasks_to_add): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :param chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :type chunk_tasks_to_add: list[~BatchTaskAddResult] - """ - - try: - create_task_collection_response: _models.BatchTaskAddCollectionResult = ( - self._batch_client.create_task_collection( - job_id=self._job_id, - task_collection=_models.BatchTaskGroup(value=chunk_tasks_to_add), - **self._kwargs - ) - ) - except HttpResponseError as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error and e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self.errors.appendleft(e) - _LOGGER.error( - "Failed to add task with ID %s due to the body" " exceeding the maximum request size", - failed_task.id, - ) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - # Restrict one thread at a time to do this compare and set, - # therefore forcing max_tasks_per_request to be strictly decreasing - with self._max_tasks_lock: - if midpoint < self._max_tasks_per_request: - _LOGGER.info( - "Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", - str(self._max_tasks_per_request), - str(midpoint), - ) - self._max_tasks_per_request = midpoint - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another thread . - self.tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: - self.tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - else: - try: - create_task_collection_response = create_task_collection_response.output - except AttributeError: - pass - if create_task_collection_response.value: - for task_result in create_task_collection_response.value: # pylint: disable=no-member - if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: - # Server error will be retried - with self._pending_queue_lock: - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self.tasks_to_add.appendleft(task) - elif ( - task_result.status == _models.BatchTaskAddStatus.CLIENT_ERROR - and not (task_result.error and task_result.error.code == "TaskExists") - ): - # Client error will be recorded unless Task already exists - self.failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - def task_collection_thread_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self.tasks_to_add and not self.errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - with self._pending_queue_lock: - while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: - chunk_tasks_to_add.append(self.tasks_to_add.pop()) - - if chunk_tasks_to_add: - self._bulk_add_tasks(results_queue, chunk_tasks_to_add) - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted create_task_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results diff --git a/sdk/batch/azure-batch/azure/batch/_patch.py b/sdk/batch/azure-batch/azure/batch/_patch.py index a9f1f6eeca2a..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/_patch.py @@ -1,162 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import base64 -import hmac -import hashlib -import importlib -from datetime import datetime -from typing import TYPE_CHECKING, TypeVar, Any, Union +from typing import List -from azure.core.pipeline.policies import SansIOHTTPPolicy -from azure.core.credentials import AzureNamedKeyCredential, TokenCredential -from azure.core.pipeline import PipelineResponse, PipelineRequest -from azure.core.pipeline.transport import HttpResponse -from azure.core.rest import HttpRequest - -from ._client import BatchClient as GenerateBatchClient -from ._serialization import ( - Serializer, - TZ_UTC, -) - -try: - from urlparse import urlparse, parse_qs -except ImportError: - from urllib.parse import urlparse, parse_qs -__all__ = [ - "BatchClient", -] # Add all objects you want publicly available to users at this package level - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Optional, TypeVar, Union - - from azure.core.credentials import TokenCredential - from azure.core.pipeline import PipelineRequest - - ClientType = TypeVar("ClientType", bound="BatchClient") - T = TypeVar("T") - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - - -class BatchSharedKeyAuthPolicy(SansIOHTTPPolicy): - - headers_to_sign = [ - "content-encoding", - "content-language", - "content-length", - "content-md5", - "content-type", - "date", - "if-modified-since", - "if-match", - "if-none-match", - "if-unmodified-since", - "range", - ] - - def __init__(self, credential: AzureNamedKeyCredential): - super(BatchSharedKeyAuthPolicy, self).__init__() - self._account_name = credential.named_key.name - self._key = credential.named_key.key - - def on_request(self, request: PipelineRequest): - if not request.http_request.headers.get("ocp-date"): - now = datetime.utcnow() - now = now.replace(tzinfo=TZ_UTC) - request.http_request.headers["ocp-date"] = Serializer.serialize_rfc(now) - url = urlparse(request.http_request.url) - uri_path = url.path - - # method to sign - string_to_sign = request.http_request.method + "\n" - - # get headers to sign - request_header_dict = {key.lower(): val for key, val in request.http_request.headers.items() if val} - - if request.http_request.method not in ["GET", "HEAD"]: - if "content-length" not in request_header_dict: - request_header_dict["content-length"] = "0" - - request_headers = [str(request_header_dict.get(x, "")) for x in self.headers_to_sign] - - string_to_sign += "\n".join(request_headers) + "\n" - - # get ocp- header to sign - ocp_headers = [] - for name, value in request.http_request.headers.items(): - if "ocp-" in name and value: - ocp_headers.append((name.lower(), value)) - for name, value in sorted(ocp_headers): - string_to_sign += "{}:{}\n".format(name, value) - # get account_name and uri path to sign - string_to_sign += "/{}{}".format(self._account_name, uri_path) - - # get query string to sign if it is not table service - query_to_sign = parse_qs(url.query) - - for name in sorted(query_to_sign.keys()): - value = query_to_sign[name][0] - if value: - string_to_sign += "\n{}:{}".format(name, value) - # sign the request - auth_string = "SharedKey {}:{}".format(self._account_name, self._sign_string(string_to_sign)) - - request.http_request.headers["Authorization"] = auth_string - - return super().on_request(request) - - def _sign_string(self, string_to_sign): - - _key = self._key.encode("utf-8") - string_to_sign = string_to_sign.encode("utf-8") - - try: - key = base64.b64decode(_key) - except TypeError: - raise ValueError("Invalid key value: {}".format(self._key)) - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - - return base64.b64encode(digest).decode("utf-8") - - -class BatchClient(GenerateBatchClient): - """BatchClient. - - :param endpoint: HTTP or HTTPS endpoint for the Web PubSub service instance. - :type endpoint: str - :param hub: Target hub name, which should start with alphabetic characters and only contain - alpha-numeric characters or underscore. - :type hub: str - :param credentials: Credential needed for the client to connect to Azure. - :type credentials: ~azure.identity.ClientSecretCredential, ~azure.core.credentials.AzureNamedKeyCredential, - or ~azure.identity.TokenCredentials - :keyword api_version: Api Version. The default value is "2021-10-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: Union[AzureNamedKeyCredential, TokenCredential], **kwargs): - super().__init__( - endpoint=endpoint, - credential=credential, # type: ignore - authentication_policy=kwargs.pop( - "authentication_policy", self._format_shared_key_credential("", credential) - ), - **kwargs - ) - - def _format_shared_key_credential(self, account_name, credential): - if isinstance(credential, AzureNamedKeyCredential): - return BatchSharedKeyAuthPolicy(credential) - return None +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/batch/azure-batch/azure/batch/_utils/__init__.py b/sdk/batch/azure-batch/azure/batch/_utils/__init__.py new file mode 100644 index 000000000000..8026245c2abc --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_utils/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- diff --git a/sdk/batch/azure-batch/azure/batch/_model_base.py b/sdk/batch/azure-batch/azure/batch/_utils/model_base.py similarity index 98% rename from sdk/batch/azure-batch/azure/batch/_model_base.py rename to sdk/batch/azure-batch/azure/batch/_utils/model_base.py index 3072ee252ed9..49d5c7259389 100644 --- a/sdk/batch/azure-batch/azure/batch/_model_base.py +++ b/sdk/batch/azure-batch/azure/batch/_utils/model_base.py @@ -2,8 +2,9 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=protected-access, broad-except @@ -21,6 +22,7 @@ from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import xml.etree.ElementTree as ET +from collections.abc import MutableMapping from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError @@ -28,11 +30,6 @@ from azure.core.pipeline import PipelineResponse from azure.core.serialization import _Null -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping - _LOGGER = logging.getLogger(__name__) __all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] @@ -347,7 +344,7 @@ def _get_model(module_name: str, model_name: str): _UNSET = object() -class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object +class _MyMutableMapping(MutableMapping[str, typing.Any]): def __init__(self, data: typing.Dict[str, typing.Any]) -> None: self._data = data @@ -407,13 +404,13 @@ def get(self, key: str, default: typing.Any = None) -> typing.Any: return default @typing.overload - def pop(self, key: str) -> typing.Any: ... + def pop(self, key: str) -> typing.Any: ... # pylint: disable=arguments-differ @typing.overload - def pop(self, key: str, default: _T) -> _T: ... + def pop(self, key: str, default: _T) -> _T: ... # pylint: disable=signature-differs @typing.overload - def pop(self, key: str, default: typing.Any) -> typing.Any: ... + def pop(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: """ @@ -443,7 +440,7 @@ def clear(self) -> None: """ self._data.clear() - def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: # pylint: disable=arguments-differ """ Updates D from mapping/iterable E and F. :param any args: Either a mapping object or an iterable of key-value pairs. @@ -454,7 +451,7 @@ def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: def setdefault(self, key: str, default: None = None) -> None: ... @typing.overload - def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: """ @@ -644,7 +641,7 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - return super().__new__(cls) # pylint: disable=no-value-for-parameter + return super().__new__(cls) def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: @@ -680,7 +677,7 @@ def _deserialize(cls, data, exist_discriminators): discriminator_value = data.find(xml_name).text # pyright: ignore else: discriminator_value = data.get(discriminator._rest_name) - mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member return mapped_cls._deserialize(data, exist_discriminators) def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: diff --git a/sdk/batch/azure-batch/azure/batch/_serialization.py b/sdk/batch/azure-batch/azure/batch/_utils/serialization.py similarity index 98% rename from sdk/batch/azure-batch/azure/batch/_serialization.py rename to sdk/batch/azure-batch/azure/batch/_utils/serialization.py index 7a0232de5ddc..eb86ea23c965 100644 --- a/sdk/batch/azure-batch/azure/batch/_serialization.py +++ b/sdk/batch/azure-batch/azure/batch/_utils/serialization.py @@ -1,28 +1,10 @@ # pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 # -------------------------------------------------------------------------- -# # Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pyright: reportUnnecessaryTypeIgnoreComment=false diff --git a/sdk/batch/azure-batch/azure/batch/_vendor.py b/sdk/batch/azure-batch/azure/batch/_utils/utils.py similarity index 84% rename from sdk/batch/azure-batch/azure/batch/_vendor.py rename to sdk/batch/azure-batch/azure/batch/_utils/utils.py index 396a0128421a..927adb7c8ae2 100644 --- a/sdk/batch/azure-batch/azure/batch/_vendor.py +++ b/sdk/batch/azure-batch/azure/batch/_utils/utils.py @@ -6,23 +6,23 @@ # -------------------------------------------------------------------------- from abc import ABC -from typing import Optional, TYPE_CHECKING +from typing import Generic, Optional, TYPE_CHECKING, TypeVar from azure.core import MatchConditions -from ._configuration import BatchClientConfiguration - if TYPE_CHECKING: - from azure.core import PipelineClient + from .serialization import Deserializer, Serializer + - from ._serialization import Deserializer, Serializer +TClient = TypeVar("TClient") +TConfig = TypeVar("TConfig") -class BatchClientMixinABC(ABC): +class ClientMixinABC(ABC, Generic[TClient, TConfig]): """DO NOT use this class. It is for internal typing use only.""" - _client: "PipelineClient" - _config: BatchClientConfiguration + _client: TClient + _config: TConfig _serialize: "Serializer" _deserialize: "Deserializer" diff --git a/sdk/batch/azure-batch/azure/batch/_version.py b/sdk/batch/azure-batch/azure/batch/_version.py index 0068a9979d9d..be71c81bd282 100644 --- a/sdk/batch/azure-batch/azure/batch/_version.py +++ b/sdk/batch/azure-batch/azure/batch/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "15.0.0b2" +VERSION = "1.0.0b1" diff --git a/sdk/batch/azure-batch/azure/batch/aio/_client.py b/sdk/batch/azure-batch/azure/batch/aio/_client.py index 6a2133679d27..6a1093347c66 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_client.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_client.py @@ -14,7 +14,7 @@ from azure.core.pipeline import policies from azure.core.rest import AsyncHttpResponse, HttpRequest -from .._serialization import Deserializer, Serializer +from .._utils.serialization import Deserializer, Serializer from ._configuration import BatchClientConfiguration from ._operations import BatchClientOperationsMixin @@ -39,6 +39,7 @@ class BatchClient(BatchClientOperationsMixin): def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: _endpoint = "{endpoint}" self._config = BatchClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + kwargs["request_id_header_name"] = "client-request-id" _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py index ddda2a1d449d..d6691e1bc695 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py @@ -6,13 +6,13 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping import datetime import json -import sys from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, List, Optional, TypeVar import urllib.parse -from azure.core import MatchConditions +from azure.core import AsyncPipelineClient, MatchConditions from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -32,7 +32,6 @@ from azure.core.utils import case_insensitive_dict from ... import models as _models -from ..._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize from ..._operations._operations import ( build_batch_cancel_certificate_deletion_request, build_batch_create_certificate_request, @@ -112,17 +111,17 @@ build_batch_update_pool_request, build_batch_upload_node_logs_request, ) -from .._vendor import BatchClientMixinABC +from ..._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from ..._utils.utils import ClientMixinABC +from .._configuration import BatchClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] -class BatchClientOperationsMixin(BatchClientMixinABC): # pylint: disable=too-many-public-methods +class BatchClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], BatchClientConfiguration] +): @distributed_trace def list_applications( @@ -211,7 +210,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchApplication], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchApplication], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -434,7 +433,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -638,7 +637,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPool], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPool], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -1925,7 +1924,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -2038,7 +2037,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3073,7 +3072,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3199,7 +3198,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJob], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3327,7 +3326,9 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized["value"]) + list_of_elem = _deserialize( + List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized.get("value", []) + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3609,7 +3610,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchCertificate], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -3815,7 +3816,7 @@ async def get_certificate( ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any - ) -> _models.GetCertificateResponse: + ) -> _models.BatchCertificate: """Gets information about the specified Certificate. :param thumbprint_algorithm: The algorithm used to derive the thumbprint parameter. This must @@ -3833,8 +3834,8 @@ async def get_certificate( :paramtype ocpdate: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] - :return: GetCertificateResponse. The GetCertificateResponse is compatible with MutableMapping - :rtype: ~azure.batch.models.GetCertificateResponse + :return: BatchCertificate. The BatchCertificate is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchCertificate :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -3848,7 +3849,7 @@ async def get_certificate( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.GetCertificateResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.BatchCertificate] = kwargs.pop("cls", None) _request = build_batch_get_certificate_request( thumbprint_algorithm=thumbprint_algorithm, @@ -3891,7 +3892,7 @@ async def get_certificate( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.GetCertificateResponse, response.json()) + deserialized = _deserialize(_models.BatchCertificate, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -4967,7 +4968,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -5181,7 +5182,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchTask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchTask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -5760,7 +5761,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -6213,10 +6214,8 @@ async def get_task_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace_async - async def _get_task_file_properties_internal( + async def get_task_file_properties( self, job_id: str, task_id: str, @@ -6421,7 +6420,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -7635,7 +7634,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNode], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNode], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -7852,7 +7851,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) @@ -8080,10 +8079,8 @@ async def get_node_file( return deserialized # type: ignore - # manually renamed - # rename will be through typespec in next version @distributed_trace_async - async def _get_node_file_properties_internal( + async def get_node_file_properties( self, pool_id: str, node_id: str, @@ -8286,7 +8283,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py index b3c12ac94cfb..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py @@ -1,523 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio -import datetime -import collections -import logging -from typing import Any, Deque, AsyncIterator, List, Iterable, Optional, Union +from typing import List -from azure.batch import models as _models -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from ._operations import ( - BatchClientOperationsMixin as BatchClientOperationsMixinGenerated, -) - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -__all__: List[str] = [ - "BatchClientOperationsMixin" -] # Add all objects you want publicly available to users at this package level - - -class BatchClientOperationsMixin(BatchClientOperationsMixinGenerated): - """Customize generated code""" - - # create_task_collection renamed - @distributed_trace - async def create_tasks( - self, - job_id: str, - task_collection: List[_models.BatchTaskCreateContent], - concurrencies: int = 0, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: - """Adds a collection of Tasks to the specified Job. - - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. - - :param job_id: The ID of the Job to which the Task collection is to be added. Required. - :type job_id: str - :param task_collection: The Tasks to be added. Required. - :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrency: number of coroutines to use in parallel when adding tasks. If specified - and greater than 0, will start additional coroutines to submit requests and wait for them to finish. - Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrency: int - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword content_type: Type of content. Default value is "application/json; - odata=minimalmetadata". - :paramtype content_type: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult - :raises ~azure.batch.custom.CreateTasksError - """ - - kwargs.update({"timeout": timeout, "ocpdate": ocpdate}) - - results_queue: Deque[_models.BatchTaskAddResult] = collections.deque() - task_workflow_manager = _TaskWorkflowManager( - self, job_id=job_id, task_collection=task_collection, **kwargs - ) - - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") - - coroutines = [] - for i in range(concurrencies): - coroutines.append(task_workflow_manager.task_collection_handler(results_queue)) - await asyncio.gather(*coroutines) - else: - await task_workflow_manager.task_collection_handler(results_queue) - - # Only define error if all coroutines have finished and there were failures - if task_workflow_manager.failure_tasks or task_workflow_manager.errors: - raise _models.CreateTasksError( - task_workflow_manager.tasks_to_add, - task_workflow_manager.failure_tasks, - task_workflow_manager.errors, - ) - else: - submitted_tasks = _handle_output(results_queue) - return _models.BatchTaskAddCollectionResult(value=submitted_tasks) - - @distributed_trace - async def get_node_file( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> AsyncIterator[bytes]: - """Returns the content of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - args = [pool_id, node_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return await super().get_node_file(*args, **kwargs) - - @distributed_trace - async def get_node_file_properties( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_node_file_properties_internal( # type: ignore - pool_id, - node_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - async def get_task_file_properties( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchFileProperties: - """Gets the properties of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchFileProperties - :rtype: ~azure.batch.models.BatchFileProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - - cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - url=headers["ocp-batch-file-url"], - is_directory=headers["ocp-batch-file-isdirectory"], - last_modified=headers["Last-Modified"], - content_length=headers["Content-Length"], - creation_time=headers["ocp-creation-time"], - # content_type=headers["Content-Type"], # need to add to typespec - file_mode=headers["ocp-batch-file-mode"], - ) - - get_response: _models.BatchFileProperties = super()._get_task_file_properties_internal( # type: ignore - job_id, - task_id, - file_path, - timeout=timeout, - ocpdate=ocpdate, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - cls=cls, - **kwargs) - - return get_response - - @distributed_trace - async def get_task_file( - self, - job_id: str, - task_id: str, - file_path: str, - *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> AsyncIterator[bytes]: - """Returns the content of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "timeout": timeout, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return await super().get_task_file(*args, **kwargs) - - -class _TaskWorkflowManager: - """Worker class for one create_task_collection request - - :param str job_id: The ID of the job to which the task collection is to be - added. - :ivar tasks_to_add: The collection of tasks to add. - :vartype tasks_to_add: Iterable[~azure.batch.models.BatchTaskCreateContent] - :param task_create_task_collection_options: Additional parameters for the - operation - :type task_create_task_collection_options: :class:`TaskAddCollectionOptions - ` - """ - - def __init__( - self, - batch_client: BatchClientOperationsMixin, - job_id: str, - task_collection: Iterable[_models.BatchTaskCreateContent], - **kwargs - ): - # List of tasks which failed to add due to a returned client error - self.failure_tasks: Deque[_models.BatchTaskAddResult] = collections.deque() - # List of unknown exceptions which occurred during requests. - self.errors: Deque[Any] = collections.deque() - - # synchronized through lock variables - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - self.tasks_to_add = collections.deque(task_collection) - - # Variables to be used for task create_task_collection requests - self._batch_client = batch_client - self._job_id = job_id - - self._kwargs = kwargs - - async def _bulk_add_tasks( - self, - results_queue: collections.deque, - chunk_tasks_to_add: List[_models.BatchTaskCreateContent], - ): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :ivar chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :vartype chunk_tasks_to_add: list[~azure.batch.models.BatchTaskCreateContent] - """ - - try: - create_task_collection_response: _models.BatchTaskAddCollectionResult = ( - await self._batch_client.create_task_collection( - job_id=self._job_id, - task_collection=_models.BatchTaskGroup(value=chunk_tasks_to_add), - **self._kwargs - ) - ) - except HttpResponseError as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error and e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self.errors.appendleft(e) - _LOGGER.error( - "Failed to add task with ID %s due to the body" " exceeding the maximum request size", - failed_task.id, - ) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - if midpoint < self._max_tasks_per_request: - _LOGGER.info( - "Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", - str(self._max_tasks_per_request), - str(midpoint), - ) - self._max_tasks_per_request = midpoint - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another coroutines . - self.tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - await self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: # type: ignore - self.tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - else: - if create_task_collection_response.value: - for task_result in create_task_collection_response.value: - if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: - # Server error will be retried - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self.tasks_to_add.appendleft(task) - elif ( - task_result.status == _models.BatchTaskAddStatus.CLIENT_ERROR - and not (task_result.error and task_result.error.code == "TaskExists") - ): - # Client error will be recorded unless Task already exists - self.failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - async def task_collection_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self.tasks_to_add and not self.errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: - chunk_tasks_to_add.append(self.tasks_to_add.pop()) - - if chunk_tasks_to_add: - await self._bulk_add_tasks(results_queue, chunk_tasks_to_add) +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -527,20 +19,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted create_task_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results diff --git a/sdk/batch/azure-batch/azure/batch/aio/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_patch.py index 64a3f1262c22..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_patch.py @@ -1,55 +1,15 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from typing import List -from ._client import BatchClient as GenerateBatchClient -from .._patch import BatchSharedKeyAuthPolicy -from azure.core.credentials import TokenCredential - - -from azure.core.credentials import AzureNamedKeyCredential - - -from typing import Union - -__all__ = [ - "BatchClient", -] # Add all objects you want publicly available to users at this package level - - -class BatchClient(GenerateBatchClient): - """BatchClient. - - :param endpoint: HTTP or HTTPS endpoint for the Web PubSub service instance. - :type endpoint: str - :param hub: Target hub name, which should start with alphabetic characters and only contain - alpha-numeric characters or underscore. - :type hub: str - :param credentials: Credential needed for the client to connect to Azure. - :type credentials: ~azure.identity.ClientSecretCredential, ~azure.core.credentials.AzureNamedKeyCredential, - or ~azure.identity.TokenCredentials - :keyword api_version: Api Version. The default value is "2021-10-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: Union[AzureNamedKeyCredential, TokenCredential], **kwargs): - super().__init__( - endpoint=endpoint, - credential=credential, # type: ignore - authentication_policy=kwargs.pop("authentication_policy", self._format_shared_key_credential(credential)), - **kwargs - ) - - def _format_shared_key_credential(self, credential): - if isinstance(credential, AzureNamedKeyCredential): - return BatchSharedKeyAuthPolicy(credential) - return None +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/batch/azure-batch/azure/batch/aio/_vendor.py b/sdk/batch/azure-batch/azure/batch/aio/_vendor.py deleted file mode 100644 index baee6ee7264c..000000000000 --- a/sdk/batch/azure-batch/azure/batch/aio/_vendor.py +++ /dev/null @@ -1,57 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from abc import ABC -from typing import Optional, TYPE_CHECKING - -from azure.core import MatchConditions - -from ._configuration import BatchClientConfiguration - -if TYPE_CHECKING: - from azure.core import AsyncPipelineClient - - from .._serialization import Deserializer, Serializer - - -class BatchClientMixinABC(ABC): - """DO NOT use this class. It is for internal typing use only.""" - - _client: "AsyncPipelineClient" - _config: BatchClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" - - -def quote_etag(etag: Optional[str]) -> Optional[str]: - if not etag or etag == "*": - return etag - if etag.startswith("W/"): - return etag - if etag.startswith('"') and etag.endswith('"'): - return etag - if etag.startswith("'") and etag.endswith("'"): - return etag - return '"' + etag + '"' - - -def prep_if_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: - if match_condition == MatchConditions.IfNotModified: - if_match = quote_etag(etag) if etag else None - return if_match - if match_condition == MatchConditions.IfPresent: - return "*" - return None - - -def prep_if_none_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: - if match_condition == MatchConditions.IfModified: - if_none_match = quote_etag(etag) if etag else None - return if_none_match - if match_condition == MatchConditions.IfMissing: - return "*" - return None diff --git a/sdk/batch/azure-batch/azure/batch/models/__init__.py b/sdk/batch/azure-batch/azure/batch/models/__init__.py index c54aed6d5845..393ee8aedeb0 100644 --- a/sdk/batch/azure-batch/azure/batch/models/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/models/__init__.py @@ -123,7 +123,6 @@ ExitConditions, ExitOptions, FileProperties, - GetCertificateResponse, HttpHeader, ImageReference, InboundEndpoint, @@ -337,7 +336,6 @@ "ExitConditions", "ExitOptions", "FileProperties", - "GetCertificateResponse", "HttpHeader", "ImageReference", "InboundEndpoint", diff --git a/sdk/batch/azure-batch/azure/batch/models/_models.py b/sdk/batch/azure-batch/azure/batch/models/_models.py index 842decf5de5c..0094a5d6a250 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_models.py +++ b/sdk/batch/azure-batch/azure/batch/models/_models.py @@ -11,14 +11,13 @@ import datetime from typing import Any, Dict, List, Mapping, Optional, TYPE_CHECKING, Union, overload -from .. import _model_base -from .._model_base import rest_field +from .._utils.model_base import Model as _Model, rest_field if TYPE_CHECKING: from .. import models as _models -class AffinityInfo(_model_base.Model): +class AffinityInfo(_Model): """A locality hint that can be used by the Batch service to select a Compute Node on which to start a Task. @@ -54,7 +53,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AuthenticationTokenSettings(_model_base.Model): +class AuthenticationTokenSettings(_Model): """The settings for an authentication token that the Task can use to perform Batch service operations. @@ -91,7 +90,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AutomaticOsUpgradePolicy(_model_base.Model): +class AutomaticOsUpgradePolicy(_Model): """The configuration parameters used for performing automatic OS upgrade. :ivar disable_automatic_rollback: Whether OS image rollback feature should be disabled. @@ -154,7 +153,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AutoScaleRun(_model_base.Model): +class AutoScaleRun(_Model): """The results and errors from an execution of a Pool autoscale formula. :ivar timestamp: The time at which the autoscale formula was last evaluated. Required. @@ -202,7 +201,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AutoScaleRunError(_model_base.Model): +class AutoScaleRunError(_Model): """An error that occurred when executing or evaluating a Pool autoscale formula. :ivar code: An identifier for the autoscale error. Codes are invariant and are intended to be @@ -246,7 +245,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AutoUserSpecification(_model_base.Model): +class AutoUserSpecification(_Model): """Specifies the options for the auto user that runs an Azure Batch Task. :ivar scope: The scope for the auto user. The default value is pool. If the pool is running @@ -293,7 +292,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AzureBlobFileSystemConfiguration(_model_base.Model): +class AzureBlobFileSystemConfiguration(_Model): """Information used to connect to an Azure Storage Container using Blobfuse. :ivar account_name: The Azure Storage Account name. Required. @@ -372,7 +371,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AzureFileShareConfiguration(_model_base.Model): +class AzureFileShareConfiguration(_Model): """Information used to connect to an Azure Fileshare. :ivar account_name: The Azure Storage account name. Required. @@ -431,7 +430,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchApplication(_model_base.Model): +class BatchApplication(_Model): """Contains information about an application in an Azure Batch Account. :ivar id: A string that uniquely identifies the application within the Account. Required. @@ -469,7 +468,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchApplicationPackageReference(_model_base.Model): +class BatchApplicationPackageReference(_Model): """A reference to an Package to be deployed to Compute Nodes. :ivar application_id: The ID of the application to deploy. When creating a pool, the package's @@ -516,7 +515,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchAutoPoolSpecification(_model_base.Model): +class BatchAutoPoolSpecification(_Model): """Specifies characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. @@ -583,7 +582,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchCertificate(_model_base.Model): +class BatchCertificate(_Model): """A Certificate that can be installed on Compute Nodes and can be used to authenticate operations on the machine. @@ -686,7 +685,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchCertificateReference(_model_base.Model): +class BatchCertificateReference(_Model): """A reference to a Certificate to be installed on Compute Nodes in a Pool. Warning: This object is deprecated and will be removed after February, 2024. Please use the `Azure KeyVault Extension `_ @@ -774,7 +773,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchError(_model_base.Model): +class BatchError(_Model): """An error response received from the Azure Batch service. :ivar code: An identifier for the error. Codes are invariant and are intended to be consumed @@ -820,7 +819,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchErrorDetail(_model_base.Model): +class BatchErrorDetail(_Model): """An item of additional information included in an Azure Batch error response. :ivar key: An identifier specifying the meaning of the Value property. @@ -853,7 +852,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchErrorMessage(_model_base.Model): +class BatchErrorMessage(_Model): """An error message received in an Azure Batch error response. :ivar lang: The language code of the error message. @@ -886,7 +885,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJob(_model_base.Model): +class BatchJob(_Model): """An Azure Batch Job. :ivar id: A string that uniquely identifies the Job within the Account. The ID is @@ -1113,7 +1112,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobConstraints(_model_base.Model): +class BatchJobConstraints(_Model): """The execution constraints for a Job. :ivar max_wall_clock_time: The maximum elapsed time that the Job may run, measured from the @@ -1169,7 +1168,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobCreateContent(_model_base.Model): +class BatchJobCreateContent(_Model): """Parameters for creating an Azure Batch Job. :ivar id: A string that uniquely identifies the Job within the Account. The ID can contain any @@ -1381,7 +1380,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobDisableContent(_model_base.Model): +class BatchJobDisableContent(_Model): """Parameters for disabling an Azure Batch Job. :ivar disable_tasks: What to do with active Tasks associated with the Job. Required. Known @@ -1413,7 +1412,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobExecutionInfo(_model_base.Model): +class BatchJobExecutionInfo(_Model): """Contains information about the execution of a Job in the Azure Batch service. :ivar start_time: The start time of the Job. This is the time at which the Job was created. @@ -1500,7 +1499,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobManagerTask(_model_base.Model): +class BatchJobManagerTask(_Model): """Specifies details of a Job Manager Task. The Job Manager Task is automatically started when the Job is created. The Batch service tries to schedule the Job Manager Task before any other Tasks in @@ -1762,7 +1761,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobNetworkConfiguration(_model_base.Model): +class BatchJobNetworkConfiguration(_Model): """The network configuration for the Job. :ivar subnet_id: The ARM resource identifier of the virtual network subnet which Compute Nodes @@ -1839,7 +1838,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobPreparationAndReleaseTaskStatus(_model_base.Model): +class BatchJobPreparationAndReleaseTaskStatus(_Model): """The status of the Job Preparation and Job Release Tasks on a Compute Node. :ivar pool_id: The ID of the Pool containing the Compute Node to which this entry refers. @@ -1896,7 +1895,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobPreparationTask(_model_base.Model): +class BatchJobPreparationTask(_Model): """A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. You can use Job Preparation to prepare a Node to run Tasks for the Job. Activities commonly performed in Job Preparation include: Downloading common @@ -2086,7 +2085,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobPreparationTaskExecutionInfo(_model_base.Model): +class BatchJobPreparationTaskExecutionInfo(_Model): """Contains information about the execution of a Job Preparation Task on a Compute Node. @@ -2228,7 +2227,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobReleaseTask(_model_base.Model): +class BatchJobReleaseTask(_Model): """A Job Release Task to run on Job completion on any Compute Node where the Job has run. The Job Release Task runs when the Job ends, because of one of the following: The user calls the Terminate Job API, or the Delete Job API while the Job is @@ -2383,7 +2382,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobReleaseTaskExecutionInfo(_model_base.Model): +class BatchJobReleaseTaskExecutionInfo(_Model): """Contains information about the execution of a Job Release Task on a Compute Node. @@ -2494,7 +2493,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobSchedule(_model_base.Model): +class BatchJobSchedule(_Model): """A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a specification used to create each Job. @@ -2622,7 +2621,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleConfiguration(_model_base.Model): +class BatchJobScheduleConfiguration(_Model): """The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. @@ -2726,7 +2725,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleCreateContent(_model_base.Model): +class BatchJobScheduleCreateContent(_Model): """Parameters for creating an Azure Batch Job Schedule. :ivar id: A string that uniquely identifies the schedule within the Account. The ID can contain @@ -2794,7 +2793,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleExecutionInfo(_model_base.Model): +class BatchJobScheduleExecutionInfo(_Model): """Contains information about Jobs that have been and will be run under a Job Schedule. @@ -2849,7 +2848,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleStatistics(_model_base.Model): +class BatchJobScheduleStatistics(_Model): """Resource usage statistics for a Job Schedule. :ivar url: The URL of the statistics. Required. @@ -3003,7 +3002,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleUpdateContent(_model_base.Model): +class BatchJobScheduleUpdateContent(_Model): """Parameters for updating an Azure Batch Job Schedule. :ivar schedule: The schedule according to which Jobs will be created. All times are fixed @@ -3057,7 +3056,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobSchedulingError(_model_base.Model): +class BatchJobSchedulingError(_Model): """An error encountered by the Batch service when scheduling a Job. :ivar category: The category of the Job scheduling error. Required. Known values are: @@ -3110,7 +3109,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobSpecification(_model_base.Model): +class BatchJobSpecification(_Model): """Specifies details of the Jobs to be created on a schedule. :ivar priority: The priority of Jobs created under this schedule. Priority values can range @@ -3315,7 +3314,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobStatistics(_model_base.Model): +class BatchJobStatistics(_Model): """Resource usage statistics for a Job. :ivar url: The URL of the statistics. Required. @@ -3460,7 +3459,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobTerminateContent(_model_base.Model): +class BatchJobTerminateContent(_Model): """Parameters for terminating an Azure Batch Job. :ivar termination_reason: The text you want to appear as the Job's TerminationReason. The @@ -3491,7 +3490,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobUpdateContent(_model_base.Model): +class BatchJobUpdateContent(_Model): """Parameters for updating an Azure Batch Job. :ivar priority: The priority of the Job. Priority values can range from -1000 to 1000, with @@ -3609,7 +3608,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNode(_model_base.Model): +class BatchNode(_Model): """A Compute Node in the Batch service. :ivar id: The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a @@ -3863,7 +3862,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeAgentInfo(_model_base.Model): +class BatchNodeAgentInfo(_Model): """The Batch Compute Node agent is a program that runs on each Compute Node in the Pool and provides Batch capability on the Compute Node. @@ -3908,7 +3907,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeCounts(_model_base.Model): +class BatchNodeCounts(_Model): """The number of Compute Nodes in each Compute Node state. :ivar creating: The number of Compute Nodes in the creating state. Required. @@ -4021,7 +4020,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeDeallocateContent(_model_base.Model): +class BatchNodeDeallocateContent(_Model): """Options for deallocating a Compute Node. :ivar node_deallocate_option: When to deallocate the Compute Node and what to do with currently @@ -4055,7 +4054,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeDisableSchedulingContent(_model_base.Model): +class BatchNodeDisableSchedulingContent(_Model): """Parameters for disabling scheduling on an Azure Batch Compute Node. :ivar node_disable_scheduling_option: What to do with currently running Tasks when disabling @@ -4089,7 +4088,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeEndpointConfiguration(_model_base.Model): +class BatchNodeEndpointConfiguration(_Model): """The endpoint configuration for the Compute Node. :ivar inbound_endpoints: The list of inbound endpoints that are accessible on the Compute Node. @@ -4120,7 +4119,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeError(_model_base.Model): +class BatchNodeError(_Model): """An error encountered by a Compute Node. :ivar code: An identifier for the Compute Node error. Codes are invariant and are intended to @@ -4164,7 +4163,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeFile(_model_base.Model): +class BatchNodeFile(_Model): """Information about a file or directory on a Compute Node. :ivar name: The file path. @@ -4211,7 +4210,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeIdentityReference(_model_base.Model): +class BatchNodeIdentityReference(_Model): """The reference to a user assigned identity associated with the Batch pool which a compute node will use. @@ -4242,7 +4241,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeInfo(_model_base.Model): +class BatchNodeInfo(_Model): """Information about the Compute Node on which a Task ran. :ivar affinity_id: An identifier for the Node on which the Task ran, which can be passed when @@ -4303,7 +4302,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodePlacementConfiguration(_model_base.Model): +class BatchNodePlacementConfiguration(_Model): """For regional placement, nodes in the pool will be allocated in the same region. For zonal placement, nodes in the pool will be spread across different zones with best effort balancing. @@ -4339,7 +4338,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeRebootContent(_model_base.Model): +class BatchNodeRebootContent(_Model): """Parameters for rebooting an Azure Batch Compute Node. :ivar node_reboot_option: When to reboot the Compute Node and what to do with currently running @@ -4373,7 +4372,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeReimageContent(_model_base.Model): +class BatchNodeReimageContent(_Model): """Parameters for reimaging an Azure Batch Compute Node. :ivar node_reimage_option: When to reimage the Compute Node and what to do with currently @@ -4407,7 +4406,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeRemoteLoginSettings(_model_base.Model): +class BatchNodeRemoteLoginSettings(_Model): """The remote login settings for a Compute Node. :ivar remote_login_ip_address: The IP address used for remote login to the Compute Node. @@ -4445,7 +4444,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeRemoveContent(_model_base.Model): +class BatchNodeRemoveContent(_Model): """Parameters for removing nodes from an Azure Batch Pool. :ivar node_list: A list containing the IDs of the Compute Nodes to be removed from the @@ -4499,7 +4498,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeUserCreateContent(_model_base.Model): +class BatchNodeUserCreateContent(_Model): """Parameters for creating a user account for RDP or SSH access on an Azure Batch Compute Node. :ivar name: The user name of the Account. Required. @@ -4565,7 +4564,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeUserUpdateContent(_model_base.Model): +class BatchNodeUserUpdateContent(_Model): """Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. :ivar password: The password of the Account. The password is required for Windows Compute @@ -4622,7 +4621,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeVMExtension(_model_base.Model): +class BatchNodeVMExtension(_Model): """The configuration for virtual machine extension instance view. :ivar provisioning_state: The provisioning state of the virtual machine extension. @@ -4666,7 +4665,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPool(_model_base.Model): +class BatchPool(_Model): """A Pool in the Azure Batch service. :ivar id: A string that uniquely identifies the Pool within the Account. The ID can contain any @@ -4992,7 +4991,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolCreateContent(_model_base.Model): +class BatchPoolCreateContent(_Model): """Parameters for creating an Azure Batch Pool. :ivar id: A string that uniquely identifies the Pool within the Account. The ID can contain any @@ -5294,7 +5293,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolEnableAutoScaleContent(_model_base.Model): +class BatchPoolEnableAutoScaleContent(_Model): """Parameters for enabling automatic scaling on an Azure Batch Pool. :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. The @@ -5356,7 +5355,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolEndpointConfiguration(_model_base.Model): +class BatchPoolEndpointConfiguration(_Model): """The endpoint configuration for a Pool. :ivar inbound_nat_pools: A list of inbound NAT Pools that can be used to address specific ports @@ -5393,7 +5392,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolEvaluateAutoScaleContent(_model_base.Model): +class BatchPoolEvaluateAutoScaleContent(_Model): """Parameters for evaluating an automatic scaling formula on an Azure Batch Pool. :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. The @@ -5433,7 +5432,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolIdentity(_model_base.Model): +class BatchPoolIdentity(_Model): """The identity of the Batch pool, if configured. :ivar type: The identity of the Batch pool, if configured. The list of user identities @@ -5481,7 +5480,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolInfo(_model_base.Model): +class BatchPoolInfo(_Model): """Specifies how a Job should be assigned to a Pool. :ivar pool_id: The ID of an existing Pool. All the Tasks of the Job will run on the specified @@ -5538,7 +5537,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolNodeCounts(_model_base.Model): +class BatchPoolNodeCounts(_Model): """The number of Compute Nodes in each state for a Pool. :ivar pool_id: The ID of the Pool. Required. @@ -5580,7 +5579,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolReplaceContent(_model_base.Model): +class BatchPoolReplaceContent(_Model): """Parameters for replacing properties on an Azure Batch Pool. :ivar start_task: A Task to run on each Compute Node as it joins the Pool. The Task runs when @@ -5690,7 +5689,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolResizeContent(_model_base.Model): +class BatchPoolResizeContent(_Model): """Parameters for changing the size of an Azure Batch Pool. :ivar target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool. @@ -5752,7 +5751,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolResourceStatistics(_model_base.Model): +class BatchPoolResourceStatistics(_Model): """Statistics related to resource consumption by Compute Nodes in a Pool. :ivar start_time: The start time of the time range covered by the statistics. Required. @@ -5871,7 +5870,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolSpecification(_model_base.Model): +class BatchPoolSpecification(_Model): """Specification for creating a new Pool. :ivar display_name: The display name for the Pool. The display name need not be unique and can @@ -6151,7 +6150,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolStatistics(_model_base.Model): +class BatchPoolStatistics(_Model): """Contains utilization and resource usage statistics for the lifetime of a Pool. :ivar url: The URL for the statistics. Required. @@ -6209,7 +6208,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolUpdateContent(_model_base.Model): +class BatchPoolUpdateContent(_Model): """Parameters for updating an Azure Batch Pool. :ivar display_name: The display name for the Pool. The display name need not be unique and can @@ -6439,7 +6438,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolUsageMetrics(_model_base.Model): +class BatchPoolUsageMetrics(_Model): """Usage metrics for a Pool across an aggregation interval. :ivar pool_id: The ID of the Pool whose metrics are aggregated in this entry. Required. @@ -6501,7 +6500,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolUsageStatistics(_model_base.Model): +class BatchPoolUsageStatistics(_Model): """Statistics related to Pool usage information. :ivar start_time: The start time of the time range covered by the statistics. Required. @@ -6549,7 +6548,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchStartTask(_model_base.Model): +class BatchStartTask(_Model): """Batch will retry Tasks when a recovery operation is triggered on a Node. Examples of recovery operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute Node disappeared due to host failure. @@ -6700,7 +6699,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchStartTaskInfo(_model_base.Model): +class BatchStartTaskInfo(_Model): """Information about a StartTask running on a Compute Node. :ivar state: The state of the StartTask on the Compute Node. Required. Known values are: @@ -6825,7 +6824,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchSubtask(_model_base.Model): +class BatchSubtask(_Model): """Information about an Azure Batch subtask. :ivar id: The ID of the subtask. @@ -6957,7 +6956,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchSupportedImage(_model_base.Model): +class BatchSupportedImage(_Model): """A reference to the Azure Virtual Machines Marketplace Image and additional information about the Image. @@ -7033,7 +7032,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTask(_model_base.Model): +class BatchTask(_Model): """Batch will retry Tasks when a recovery operation is triggered on a Node. Examples of recovery operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute Node disappeared due to host failure. @@ -7297,7 +7296,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskAddCollectionResult(_model_base.Model): +class BatchTaskAddCollectionResult(_Model): """The result of adding a collection of Tasks to a Job. :ivar value: The results of the add Task collection operation. @@ -7327,7 +7326,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskAddResult(_model_base.Model): +class BatchTaskAddResult(_Model): """Result for a single Task added as part of an add Task collection operation. :ivar status: The status of the add Task request. Required. Known values are: "success", @@ -7392,7 +7391,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskConstraints(_model_base.Model): +class BatchTaskConstraints(_Model): """Execution constraints to apply to a Task. :ivar max_wall_clock_time: The maximum elapsed time that the Task may run, measured from the @@ -7460,7 +7459,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskContainerExecutionInfo(_model_base.Model): +class BatchTaskContainerExecutionInfo(_Model): """Contains information about the container which a Task is executing. :ivar container_id: The ID of the container. @@ -7506,7 +7505,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskContainerSettings(_model_base.Model): +class BatchTaskContainerSettings(_Model): """The container settings for a Task. :ivar container_run_options: Additional options to the container create command. These @@ -7580,7 +7579,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskCounts(_model_base.Model): +class BatchTaskCounts(_Model): """The Task counts for a Job. :ivar active: The number of Tasks in the active state. Required. @@ -7632,7 +7631,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskCountsResult(_model_base.Model): +class BatchTaskCountsResult(_Model): """The Task and TaskSlot counts for a Job. :ivar task_counts: The number of Tasks per state. Required. @@ -7669,7 +7668,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskCreateContent(_model_base.Model): +class BatchTaskCreateContent(_Model): """Parameters for creating an Azure Batch Task. :ivar id: A string that uniquely identifies the Task within the Job. The ID can contain any @@ -7905,7 +7904,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskDependencies(_model_base.Model): +class BatchTaskDependencies(_Model): """Specifies any dependencies of a Task. Any Task that is explicitly specified or within a dependency range must complete before the dependant Task will be scheduled. @@ -7954,7 +7953,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskExecutionInfo(_model_base.Model): +class BatchTaskExecutionInfo(_Model): """Information about the execution of a Task. :ivar start_time: The time at which the Task started running. 'Running' corresponds to the @@ -8094,7 +8093,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskFailureInfo(_model_base.Model): +class BatchTaskFailureInfo(_Model): """Information about a Task failure. :ivar category: The category of the Task error. Required. Known values are: "usererror" and @@ -8145,7 +8144,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskGroup(_model_base.Model): +class BatchTaskGroup(_Model): """A collection of Azure Batch Tasks to add. :ivar value: The collection of Tasks to add. The maximum count of Tasks is 100. The total @@ -8181,7 +8180,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskIdRange(_model_base.Model): +class BatchTaskIdRange(_Model): """The start and end of the range are inclusive. For example, if a range has start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. @@ -8215,7 +8214,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskInfo(_model_base.Model): +class BatchTaskInfo(_Model): """Information about a Task running on a Compute Node. :ivar task_url: The URL of the Task. @@ -8274,7 +8273,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskSchedulingPolicy(_model_base.Model): +class BatchTaskSchedulingPolicy(_Model): """Specifies how Tasks should be distributed across Compute Nodes. :ivar node_fill_type: How Tasks are distributed across Compute Nodes in a Pool. If not @@ -8306,7 +8305,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskSlotCounts(_model_base.Model): +class BatchTaskSlotCounts(_Model): """The TaskSlot counts for a Job. :ivar active: The number of TaskSlots for active Tasks. Required. @@ -8354,7 +8353,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskStatistics(_model_base.Model): +class BatchTaskStatistics(_Model): """Resource usage statistics for a Task. :ivar url: The URL of the statistics. Required. @@ -8465,7 +8464,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class CifsMountConfiguration(_model_base.Model): +class CifsMountConfiguration(_Model): """Information used to connect to a CIFS file system. :ivar username: The user to use for authentication against the CIFS file system. Required. @@ -8523,7 +8522,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ContainerConfiguration(_model_base.Model): +class ContainerConfiguration(_Model): """The configuration for container-enabled Pools. :ivar type: The container technology to be used. Required. Known values are: "dockerCompatible" @@ -8575,7 +8574,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ContainerHostBatchBindMountEntry(_model_base.Model): +class ContainerHostBatchBindMountEntry(_Model): """The entry of path and mount mode you want to mount into task container. :ivar source: The path which be mounted to container customer can select. Known values are: @@ -8621,7 +8620,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ContainerRegistryReference(_model_base.Model): +class ContainerRegistryReference(_Model): """A private container registry. :ivar username: The user name to log into the registry server. @@ -8670,7 +8669,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DataDisk(_model_base.Model): +class DataDisk(_Model): """Settings which will be used by the data disks associated to Compute Nodes in the Pool. When using attached data disks, you need to mount and format the disks from within a VM to use them. @@ -8734,7 +8733,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DeleteBatchCertificateError(_model_base.Model): +class DeleteBatchCertificateError(_Model): """An error encountered by the Batch service when deleting a Certificate. :ivar code: An identifier for the Certificate deletion error. Codes are invariant and are @@ -8784,7 +8783,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DiffDiskSettings(_model_base.Model): +class DiffDiskSettings(_Model): """Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). @@ -8833,7 +8832,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DiskEncryptionConfiguration(_model_base.Model): +class DiskEncryptionConfiguration(_Model): """The disk encryption configuration applied on compute nodes in the pool. Disk encryption configuration is not supported on Linux pool created with Azure Compute Gallery Image. @@ -8867,7 +8866,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class EnvironmentSetting(_model_base.Model): +class EnvironmentSetting(_Model): """An environment variable to be set on a Task process. :ivar name: The name of the environment variable. Required. @@ -8900,7 +8899,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ExitCodeMapping(_model_base.Model): +class ExitCodeMapping(_Model): """How the Batch service should respond if a Task exits with a particular exit code. @@ -8937,7 +8936,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ExitCodeRangeMapping(_model_base.Model): +class ExitCodeRangeMapping(_Model): """A range of exit codes and how the Batch service should respond to exit codes within that range. @@ -8980,7 +8979,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ExitConditions(_model_base.Model): +class ExitConditions(_Model): """Specifies how the Batch service should respond when the Task completes. :ivar exit_codes: A list of individual Task exit codes and how the Batch service should respond @@ -9053,7 +9052,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ExitOptions(_model_base.Model): +class ExitOptions(_Model): """Specifies how the Batch service responds to a particular exit condition. :ivar job_action: An action to take on the Job containing the Task, if the Task completes with @@ -9107,7 +9106,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class FileProperties(_model_base.Model): +class FileProperties(_Model): """The properties of a file on a Compute Node. :ivar creation_time: The file creation time. The creation time is not returned for files on @@ -9166,89 +9165,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class GetCertificateResponse(_model_base.Model): - """GetCertificateResponse. - - :ivar thumbprint: The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex - digits (it may include spaces but these are removed). Required. - :vartype thumbprint: str - :ivar thumbprint_algorithm: The algorithm used to derive the thumbprint. This must be sha1. - Required. - :vartype thumbprint_algorithm: str - :ivar url: The URL of the Certificate. - :vartype url: str - :ivar state: The state of the Certificate. Known values are: "active", "deleting", and - "deletefailed". - :vartype state: str or ~azure.batch.models.BatchCertificateState - :ivar state_transition_time: The time at which the Certificate entered its current state. - :vartype state_transition_time: ~datetime.datetime - :ivar previous_state: The previous state of the Certificate. This property is not set if the - Certificate is in its initial active state. Known values are: "active", "deleting", and - "deletefailed". - :vartype previous_state: str or ~azure.batch.models.BatchCertificateState - :ivar previous_state_transition_time: The time at which the Certificate entered its previous - state. This property is not set if the Certificate is in its initial Active state. - :vartype previous_state_transition_time: ~datetime.datetime - :ivar public_data: The public part of the Certificate as a base-64 encoded .cer file. - :vartype public_data: str - :ivar delete_certificate_error: The error that occurred on the last attempt to delete this - Certificate. This property is set only if the Certificate is in the DeleteFailed state. - :vartype delete_certificate_error: ~azure.batch.models.DeleteBatchCertificateError - """ - - thumbprint: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may - include spaces but these are removed). Required.""" - thumbprint_algorithm: str = rest_field( - name="thumbprintAlgorithm", visibility=["read", "create", "update", "delete", "query"] - ) - """The algorithm used to derive the thumbprint. This must be sha1. Required.""" - url: Optional[str] = rest_field(visibility=["read"]) - """The URL of the Certificate.""" - state: Optional[Union[str, "_models.BatchCertificateState"]] = rest_field(visibility=["read"]) - """The state of the Certificate. Known values are: \"active\", \"deleting\", and \"deletefailed\".""" - state_transition_time: Optional[datetime.datetime] = rest_field( - name="stateTransitionTime", visibility=["read"], format="rfc3339" - ) - """The time at which the Certificate entered its current state.""" - previous_state: Optional[Union[str, "_models.BatchCertificateState"]] = rest_field( - name="previousState", visibility=["read"] - ) - """The previous state of the Certificate. This property is not set if the Certificate is in its - initial active state. Known values are: \"active\", \"deleting\", and \"deletefailed\".""" - previous_state_transition_time: Optional[datetime.datetime] = rest_field( - name="previousStateTransitionTime", visibility=["read"], format="rfc3339" - ) - """The time at which the Certificate entered its previous state. This property is not set if the - Certificate is in its initial Active state.""" - public_data: Optional[str] = rest_field(name="publicData", visibility=["read"]) - """The public part of the Certificate as a base-64 encoded .cer file.""" - delete_certificate_error: Optional["_models.DeleteBatchCertificateError"] = rest_field( - name="deleteCertificateError", visibility=["read"] - ) - """The error that occurred on the last attempt to delete this Certificate. This property is set - only if the Certificate is in the DeleteFailed state.""" - - @overload - def __init__( - self, - *, - thumbprint: str, - thumbprint_algorithm: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class HttpHeader(_model_base.Model): +class HttpHeader(_Model): """An HTTP header name-value pair. :ivar name: The case-insensitive name of the header to be used while uploading output files. @@ -9282,7 +9199,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ImageReference(_model_base.Model): +class ImageReference(_Model): """A reference to an Azure Virtual Machines Marketplace Image or a Azure Compute Gallery Image. To get the list of all Azure Marketplace Image references verified by Azure Batch, see the ' List Supported Images ' operation. @@ -9391,7 +9308,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class InboundEndpoint(_model_base.Model): +class InboundEndpoint(_Model): """An inbound endpoint on a Compute Node. :ivar name: The name of the endpoint. Required. @@ -9448,7 +9365,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class InboundNatPool(_model_base.Model): +class InboundNatPool(_Model): """A inbound NAT Pool that can be used to address specific ports on Compute Nodes in a Batch Pool externally. @@ -9548,7 +9465,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class InstanceViewStatus(_model_base.Model): +class InstanceViewStatus(_Model): """The instance view status. :ivar code: The status code. @@ -9602,7 +9519,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class LinuxUserConfiguration(_model_base.Model): +class LinuxUserConfiguration(_Model): """Properties used to create a user Account on a Linux Compute Node. :ivar uid: The user ID of the user Account. The uid and gid properties must be specified @@ -9657,7 +9574,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ManagedDisk(_model_base.Model): +class ManagedDisk(_Model): """The managed disk parameters. :ivar storage_account_type: The storage account type for managed disk. Known values are: @@ -9696,7 +9613,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MetadataItem(_model_base.Model): +class MetadataItem(_Model): """The Batch service does not assign any meaning to this metadata; it is solely for the use of user code. @@ -9730,7 +9647,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MountConfiguration(_model_base.Model): +class MountConfiguration(_Model): """The file system to mount on each node. :ivar azure_blob_file_system_configuration: The Azure Storage Container to mount using blob @@ -9790,7 +9707,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MultiInstanceSettings(_model_base.Model): +class MultiInstanceSettings(_Model): """Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, if any of the subtasks fail (for example due to exiting with a non-zero exit code) the entire multi-instance Task fails. The multi-instance Task is then @@ -9860,7 +9777,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class NameValuePair(_model_base.Model): +class NameValuePair(_Model): """Represents a name-value pair. :ivar name: The name in the name-value pair. @@ -9893,7 +9810,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class NetworkConfiguration(_model_base.Model): +class NetworkConfiguration(_Model): """The network configuration for a Pool. :ivar subnet_id: The ARM resource identifier of the virtual network subnet which the Compute @@ -9994,7 +9911,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class NetworkSecurityGroupRule(_model_base.Model): +class NetworkSecurityGroupRule(_Model): """A network security group rule to apply to an inbound endpoint. :ivar priority: The priority for this rule. Priorities within a Pool must be unique and are @@ -10068,7 +9985,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class NfsMountConfiguration(_model_base.Model): +class NfsMountConfiguration(_Model): """Information used to connect to an NFS file system. :ivar source: The URI of the file system to mount. Required. @@ -10116,7 +10033,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OSDisk(_model_base.Model): +class OSDisk(_Model): """Settings for the operating system disk of the compute node (VM). :ivar ephemeral_os_disk_settings: Specifies the ephemeral Disk Settings for the operating @@ -10181,7 +10098,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OutputFile(_model_base.Model): +class OutputFile(_Model): """On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. @@ -10252,7 +10169,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OutputFileBlobContainerDestination(_model_base.Model): +class OutputFileBlobContainerDestination(_Model): """Specifies a file upload destination within an Azure blob storage container. :ivar path: The destination blob or virtual directory within the Azure Storage container. If @@ -10324,7 +10241,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OutputFileDestination(_model_base.Model): +class OutputFileDestination(_Model): """The destination to which a file should be uploaded. :ivar container: A location in Azure blob storage to which files are uploaded. @@ -10354,7 +10271,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OutputFileUploadConfig(_model_base.Model): +class OutputFileUploadConfig(_Model): """Options for an output file upload operation, including under what conditions to perform the upload. @@ -10389,7 +10306,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class PublicIpAddressConfiguration(_model_base.Model): +class PublicIpAddressConfiguration(_Model): """The public IP Address configuration of the networking configuration of a Pool. :ivar ip_address_provisioning_type: The provisioning type for Public IP Addresses for the Pool. @@ -10439,7 +10356,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class RecentBatchJob(_model_base.Model): +class RecentBatchJob(_Model): """Information about the most recent Job to run under the Job Schedule. :ivar id: The ID of the Job. @@ -10472,7 +10389,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ResizeError(_model_base.Model): +class ResizeError(_Model): """An error that occurred when resizing a Pool. :ivar code: An identifier for the Pool resize error. Codes are invariant and are intended to be @@ -10516,7 +10433,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ResourceFile(_model_base.Model): +class ResourceFile(_Model): """A single file or multiple files to be downloaded to a Compute Node. :ivar auto_storage_container_name: The storage container name in the auto storage Account. The @@ -10636,7 +10553,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class RollingUpgradePolicy(_model_base.Model): +class RollingUpgradePolicy(_Model): """The configuration parameters used while performing a rolling upgrade. :ivar enable_cross_zone_upgrade: Allow VMSS to ignore AZ boundaries when constructing upgrade @@ -10747,7 +10664,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class SecurityProfile(_model_base.Model): +class SecurityProfile(_Model): """Specifies the security profile settings for the virtual machine or virtual machine scale set. :ivar encryption_at_host: This property can be used by user in the request to enable or disable @@ -10810,7 +10727,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ServiceArtifactReference(_model_base.Model): +class ServiceArtifactReference(_Model): """Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. @@ -10845,7 +10762,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UefiSettings(_model_base.Model): +class UefiSettings(_Model): """Specifies the security settings like secure boot and vTPM used while creating the virtual machine. @@ -10884,7 +10801,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UpgradePolicy(_model_base.Model): +class UpgradePolicy(_Model): """Describes an upgrade policy - automatic, manual, or rolling. :ivar mode: Specifies the mode of an upgrade to virtual machines in the scale set.

@@ -10939,7 +10856,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UploadBatchServiceLogsContent(_model_base.Model): +class UploadBatchServiceLogsContent(_Model): """The Azure Batch service log files upload parameters for a Compute Node. :ivar container_url: The URL of the container within Azure Blob Storage to which to upload the @@ -11013,7 +10930,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UploadBatchServiceLogsResult(_model_base.Model): +class UploadBatchServiceLogsResult(_Model): """The result of uploading Batch service log files from a specific Compute Node. :ivar virtual_directory_name: The virtual directory within Azure Blob Storage container to @@ -11055,7 +10972,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UserAccount(_model_base.Model): +class UserAccount(_Model): """Properties used to create a user used to execute Tasks on an Azure Batch Compute Node. @@ -11121,7 +11038,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UserAssignedIdentity(_model_base.Model): +class UserAssignedIdentity(_Model): """The user assigned Identity. :ivar resource_id: The ARM resource id of the user assigned identity. Required. @@ -11157,7 +11074,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class UserIdentity(_model_base.Model): +class UserIdentity(_Model): """The definition of the user identity under which the Task is run. Specify either the userName or autoUser property, but not both. @@ -11197,7 +11114,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VirtualMachineConfiguration(_model_base.Model): +class VirtualMachineConfiguration(_Model): """The configuration for Compute Nodes in a Pool based on the Azure Virtual Machines infrastructure. @@ -11375,7 +11292,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VirtualMachineInfo(_model_base.Model): +class VirtualMachineInfo(_Model): """Info about the current state of the virtual machine. :ivar image_reference: The reference to the Azure Virtual Machine's Marketplace Image. @@ -11415,7 +11332,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VMDiskSecurityProfile(_model_base.Model): +class VMDiskSecurityProfile(_Model): """Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. @@ -11453,7 +11370,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VMExtension(_model_base.Model): +class VMExtension(_Model): """The configuration for virtual machine extensions. :ivar name: The name of the virtual machine extension. Required. @@ -11541,7 +11458,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VMExtensionInstanceView(_model_base.Model): +class VMExtensionInstanceView(_Model): """The vm extension instance view. :ivar name: The name of the vm extension instance view. @@ -11583,7 +11500,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class WindowsConfiguration(_model_base.Model): +class WindowsConfiguration(_Model): """Windows operating system settings to apply to the virtual machine. :ivar enable_automatic_updates: Whether automatic updates are enabled on the virtual machine. @@ -11615,7 +11532,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class WindowsUserConfiguration(_model_base.Model): +class WindowsUserConfiguration(_Model): """Properties used to create a user Account on a Windows Compute Node. :ivar login_mode: The login mode for the user. The default is 'batch'. Known values are: diff --git a/sdk/batch/azure-batch/azure/batch/models/_patch.py b/sdk/batch/azure-batch/azure/batch/models/_patch.py index 6435ce16c022..8bcb627aa475 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/models/_patch.py @@ -1,123 +1,16 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import datetime -from typing import List, Any, Optional +from typing import List -from azure.core.exceptions import HttpResponseError +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level -from ._models import BatchPoolReplaceContent as BatchPoolReplaceContentGenerated -from .._model_base import rest_field - -__all__: List[str] = [ - "CreateTasksError", - "BatchFileProperties", -] # Add all objects you want publicly available to users at this package level - -class CreateTasksError(HttpResponseError): - """Aggregate Exception containing details for any failures from a task add operation. - - :param str message: Error message describing exit reason - :param [~TaskAddParameter] pending_tasks: List of tasks remaining to be submitted. - :param [~TaskAddResult] failure_tasks: List of tasks which failed to add - :param [~Exception] errors: List of unknown errors forcing early termination - """ - - def __init__(self, pending_tasks=[], failure_tasks=[], errors=[]): - self.pending_tasks = pending_tasks - self.failure_tasks = failure_tasks - self.errors = errors - if failure_tasks and errors: - self.message = ( - "Multiple errors encountered. Check the `failure_tasks` and " - "`errors` properties for additional details." - ) - elif errors: - if len(errors) > 1: - self.message = ( - "Multiple errors occurred when submitting add_collection " - "requests. Check the `errors` property for the inner " - "exceptions." - ) - else: - self.message = str(errors[0]) - elif failure_tasks: - if len(failure_tasks) > 1: - self.message = ( - "Multiple client side errors occurred when adding the " - "tasks. Check the `failure_tasks` property for details on" - " these tasks." - ) - else: - result = failure_tasks[0] - self.message = "Task with id `%s` failed due to client error - %s::%s" % ( - result.task_id, - result.error.code, - result.error.message, - ) - super(CreateTasksError, self).__init__(self.message) - -class BatchFileProperties: - - """Information about a file or directory on a Compute Node with additional properties. - - :ivar url: The URL of the file. - :vartype url: str - :ivar is_directory: Whether the object represents a directory. - :vartype is_directory: bool - :ivar creation_time: The file creation time. The creation time is not returned for files on - Linux Compute Nodes. - :vartype creation_time: ~datetime.datetime - :ivar last_modified: The time at which the file was last modified. Required. - :vartype last_modified: ~datetime.datetime - :ivar content_length: The length of the file. Required. - :vartype content_length: int - :ivar content_type: The content type of the file. - :vartype content_type: str - :ivar file_mode: The file mode attribute in octal format. The file mode is returned only for - files on Linux Compute Nodes. - :vartype file_mode: str - """ - - url: Optional[str] - """The URL of the file.""" - is_directory: Optional[bool] - """Whether the object represents a directory.""" - creation_time: Optional[datetime.datetime] - """The file creation time. The creation time is not returned for files on Linux Compute Nodes.""" - last_modified: datetime.datetime - """The time at which the file was last modified. Required.""" - content_length: int - """The length of the file. Required.""" - content_type: Optional[str] - """The content type of the file.""" - file_mode: Optional[str] - """The file mode attribute in octal format. The file mode is returned only for files on Linux - Compute Nodes.""" - - def __init__( - self, - *, - url: Optional[str] = None, - is_directory: Optional[bool] = None, - last_modified: datetime.datetime, - content_length: int, - creation_time: Optional[datetime.datetime] = None, - content_type: Optional[str] = None, - file_mode: Optional[str] = None, - ) -> None: - self.url = url - self.is_directory = is_directory - self.creation_time = creation_time - self.last_modified = last_modified - self.content_length = content_length - self.content_type = content_type - self.file_mode = file_mode def patch_sdk(): """Do not remove from this file. diff --git a/sdk/batch/azure-batch/setup.py b/sdk/batch/azure-batch/setup.py index c8218eb0bece..82d04d01a4e4 100644 --- a/sdk/batch/azure-batch/setup.py +++ b/sdk/batch/azure-batch/setup.py @@ -5,7 +5,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -# coding: utf-8 + import os import re @@ -29,7 +29,7 @@ setup( name=PACKAGE_NAME, version=version, - description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + description="Microsoft Corporation {} Client Library for Python".format(PACKAGE_PPRINT_NAME), long_description=open("README.md", "r").read(), long_description_content_type="text/markdown", license="MIT License", @@ -42,7 +42,6 @@ "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -66,5 +65,5 @@ "azure-core>=1.30.0", "typing-extensions>=4.6.0", ], - python_requires=">=3.8", + python_requires=">=3.9", ) diff --git a/sdk/batch/azure-batch/tests/test_batch.py b/sdk/batch/azure-batch/tests/test_batch.py index 51e33cb2465b..cf98cfbd6c49 100644 --- a/sdk/batch/azure-batch/tests/test_batch.py +++ b/sdk/batch/azure-batch/tests/test_batch.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=too-many-lines,line-too-long,useless-suppression # coding: utf-8 # ------------------------------------------------------------------------- @@ -85,7 +85,7 @@ async def assertCreateTasksError(self, code, func, *args, **kwargs): pytest.fail("Inner BatchErrorException expected but not exist") except Exception as err: pytest.fail("Expected CreateTasksError, instead got: {!r}".format(err)) - + @CachedResourceGroupPreparer(location=AZURE_LOCATION) @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT) @pytest.mark.parametrize("BatchClient", [SyncBatchClient, AsyncBatchClient], ids=["sync", "async"]) diff --git a/sdk/batch/azure-batch/tsp-location.yaml b/sdk/batch/azure-batch/tsp-location.yaml index 96f30af8578f..e6110e0b4c38 100644 --- a/sdk/batch/azure-batch/tsp-location.yaml +++ b/sdk/batch/azure-batch/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/batch/Azure.Batch -commit: e7ed63002df7e9cc1d3e4cd139d76c4d7040acd3 +commit: 6a439da0fa799a27aef35d4fac4a29455cc86fec repo: Azure/azure-rest-api-specs additionalDirectories: